script
stringlengths
113
767k
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # As I mentioned in the MP1 assignment, I did these assignments out of order, so this is actually the data # I will be using for my mini project. # I connected to the Twitter API to examine what topics are trending in the Seattle area. This is interesting to me # as a Twitter user and Seattle local. # I looked at this page for reference on how to access the Twitter API: http://socialmedia-class.org/twittertutorial.html import json import tweepy from kaggle_secrets import UserSecretsClient user_secrets = UserSecretsClient() CONSUMER_KEY = user_secrets.get_secret("TwitterKey") CONSUMER_SECRET = user_secrets.get_secret("TwitterSecretKey") ACCESS_SECRET = user_secrets.get_secret("TwitterSecretToken") ACCESS_TOKEN = user_secrets.get_secret("TwitterToken") auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET) api = tweepy.API( auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True ) # this prints the tweets on my home page: for status in tweepy.Cursor(api.home_timeline).items(200): print(status._json) # this prints the trending topics for a location based on the WOEID: sea_trends = api.trends_place(id=2490383) print(json.dumps(sea_trends, indent=4)) with open("sea_trends.txt", "w") as outfile: json.dump(sea_trends, outfile, indent=4) for dirname, _, filenames in os.walk("/kaggle/working"): for filename in filenames: print(os.path.join(dirname, filename)) # this is as far as I could get with making a data frame out of the above trend data. # it seems like it's reading my data as a list, so it can't parse it? I don't know how to fix it. import matplotlib.pyplot as plt import pandas as pd df = pd.read_json("/kaggle/working/sea_trends.txt", orient="split") df # since I can't get my API to work, here are some visualizations of some Home Price Index data I found in Kaggle import pandas as pd HPI = pd.read_csv("../input/hpindex/HPI_master.csv") HPI HPI.plot(kind="scatter", figsize=(10, 10), x="yr", y="index_nsa", alpha=0.3, c="Purple") HPI.plot(kind="scatter", figsize=(10, 10), x="yr", y="index_nsa", alpha=0.3, c="Purple") hist = HPI.hist(figsize=(12, 8)) HPI["index_nsa"].mean # not sure why this doesn't work. Trying to show year on the x axis and the mean index_nsa on the y axis mean = HPI["index_nsa"].mean HPI.plot(kind="scatter", figsize=(10, 10), x="yr", y=mean, alpha=0.3, c="Purple")
import numpy as np import cv2 import matplotlib.pyplot as plt from tensorflow.keras.models import Model from tensorflow.keras.layers import ( Input, Conv2D, MaxPooling2D, Dropout, concatenate, Conv2DTranspose, UpSampling2D, ) from tensorflow.keras.optimizers import Adam from tensorflow.keras.metrics import MeanIoU from tensorflow.keras import backend as K from tensorflow.keras.callbacks import LambdaCallback import os from tqdm import tqdm # # Reading Data train_dir = "/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TRAIN" train_mask_dir = ( "/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TRAIN_masks" ) test_dir = "/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TEST" test_mask_dir = ( "/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TEST_masks" ) def read_resize_img(img_path, img_size): img = cv2.imread(img_path) img = cv2.resize(img, img_size) return img def load_image(img_dir, mask_dir): X = [] y = [] for img_name in tqdm(os.listdir(img_dir)): img_path = os.path.join(img_dir, img_name) mask_path = os.path.join(mask_dir, img_name.replace(".jpg", ".png")) img = read_resize_img(img_path, (128, 128))[:, :, 0] mask = read_resize_img(mask_path, (128, 128))[:, :, 0] X.append(img) y.append(mask) return np.array(X).reshape(-1, 128, 128, 1), np.array(y) / 255 X_train, y_train = load_image(train_dir, train_mask_dir) X_test, y_test = load_image(test_dir, test_mask_dir) # # Modelling def unet(input_shape): # Define the U-Net model inputs = Input(input_shape) # Downsample path conv1 = Conv2D(64, 3, activation="relu", padding="same")(inputs) conv1 = Conv2D(64, 3, activation="relu", padding="same")(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(128, 3, activation="relu", padding="same")(pool1) conv2 = Conv2D(128, 3, activation="relu", padding="same")(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(256, 3, activation="relu", padding="same")(pool2) conv3 = Conv2D(256, 3, activation="relu", padding="same")(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(512, 3, activation="relu", padding="same")(pool3) conv4 = Conv2D(512, 3, activation="relu", padding="same")(conv4) drop4 = Dropout(0.5)(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) # Bottleneck conv5 = Conv2D(1024, 3, activation="relu", padding="same")(pool4) conv5 = Conv2D(1024, 3, activation="relu", padding="same")(conv5) drop5 = Dropout(0.5)(conv5) # Upsample path up6 = UpSampling2D(size=(2, 2))(drop5) up6 = Conv2D(512, 2, activation="relu", padding="same")(up6) merge6 = concatenate([drop4, up6], axis=3) conv6 = Conv2D(512, 3, activation="relu", padding="same")(merge6) conv6 = Conv2D(512, 3, activation="relu", padding="same")(conv6) up7 = UpSampling2D(size=(2, 2))(conv6) up7 = Conv2D(256, 2, activation="relu", padding="same")(up7) merge7 = concatenate([conv3, up7], axis=3) conv7 = Conv2D(256, 3, activation="relu", padding="same")(merge7) conv7 = Conv2D(256, 3, activation="relu", padding="same")(conv7) up8 = UpSampling2D(size=(2, 2))(conv7) up8 = Conv2D(128, 2, activation="relu", padding="same")(up8) merge8 = concatenate([conv2, up8], axis=3) conv8 = Conv2D(128, 3, activation="relu", padding="same")(merge8) conv8 = Conv2D(128, 3, activation="relu", padding="same")(conv8) up9 = UpSampling2D(size=(2, 2))(conv8) up9 = Conv2D(64, 2, activation="relu", padding="same")(up9) merge9 = concatenate([conv1, up9], axis=3) conv9 = Conv2D(64, 3, activation="relu", padding="same")(merge9) conv9 = Conv2D(64, 3, activation="relu", padding="same")(conv9) conv9 = Conv2D(1, 3, activation="relu", padding="same")(conv9) # Output output = Conv2D(1, 1, activation="sigmoid")(conv9) # Define the model model = Model(inputs=inputs, outputs=output) return model try: del model except: print("model is not defined") model = unet(X_train.shape[1:]) def dice_coef(y_true, y_pred): smooth = 1 y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2.0 * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) model.compile( optimizer=Adam(learning_rate=0.0001), loss="binary_crossentropy", metrics=[dice_coef], ) model.fit(X_train, y_train, epochs=80, validation_data=(X_test, y_test)) model.save("./UNET-Eh.h5") # Download File for i in range(10): fig, ax = plt.subplots(1, 5, figsize=(10, 8)) mask_pred = np.squeeze(model.predict(np.expand_dims(X_test[i], axis=0)), axis=0) ax[0].imshow(X_test[i], cmap="gray") ax[0].set_title("Original Scan") ax[1].imshow(y_test[i], cmap="gray") ax[1].set_title("Actual Mask") ax[2].imshow(mask_pred, cmap="gray") ax[2].set_title("Pred Mask") ax[3].imshow(X_test[i], cmap="gray") ax[3].imshow(y_test[i], alpha=0.5, cmap="gray") ax[3].set_title("Scan with Original Mask") ax[4].imshow(X_test[i], cmap="gray") ax[4].imshow(mask_pred, alpha=0.5, cmap="gray") ax[4].set_title("Scan with Pred. Mask") plt.tight_layout() plt.show() from matplotlib.colors import ListedColormap for i in range(10): fig, ax = plt.subplots(1, 5, figsize=(10, 8)) mask_pred = np.squeeze(model.predict(np.expand_dims(X_test[i], axis=0)), axis=0) cmap = ListedColormap(["black", "red"]) ax[0].imshow(X_test[i], cmap="gray") ax[0].set_title("Original Scan") ax[1].imshow(y_test[i], cmap=cmap) ax[1].set_title("Actual Mask") ax[2].imshow(mask_pred, cmap=cmap) ax[2].set_title("Pred Mask") ax[3].imshow(X_test[i], cmap="gray") ax[3].imshow(y_test[i], alpha=0.5, cmap=cmap) ax[3].set_title("Scan with Original Mask") ax[4].imshow(X_test[i], cmap="gray") ax[4].imshow(mask_pred, alpha=0.5, cmap=cmap) ax[4].set_title("Scan with Pred. Mask") plt.tight_layout() plt.show()
# **Code for importing the dataset** import pandas as pd Life_Expectancy_Data = pd.read_csv("../input/life-expectancy/Life Expectancy Data.csv") Life_Expectancy_Data # **Listing the dataset information which contains number of rows and columns, data types count and memory usage by the dataset.** Life_Expectancy_Data.info() # **Listing the columns in the dataset** Life_Expectancy_Data.columns # **From the above output it is clearly known that few column names have extra spaces and some special characters. We have to clean them inorder to make it appropriate for future use. # I have used strip () method to remove the beginning and trailing spaces in the column names.** Life_Expectancy_Data.columns = Life_Expectancy_Data.columns.str.strip() Life_Expectancy_Data.columns # **To remove the space inbetween the column names and replace it with _ (underscore), I use replace() method.** Life_Expectancy_Data.columns = Life_Expectancy_Data.columns.str.replace(" ", "_") Life_Expectancy_Data.columns # **Since the column 'HIV/AIDS' have special charcter / , I rename it to HIV by using rename() method** Life_Expectancy_Data.rename(columns={"HIV/AIDS": "HIV"}, inplace=True) Life_Expectancy_Data.columns
# # **Point Couds with zarr** # Generate Point Clouds while leveraging efficient image loading with zarr. # Credit: https://www.kaggle.com/code/brettolsen/efficient-image-loading-with-zarr/notebookimport os import os import shutil import time import PIL.Image as Image from glob import glob from tifffile import tifffile import numpy as np import PIL.Image as Image import torch.utils.data as data import matplotlib.pyplot as plt import matplotlib.patches as patches from tqdm import tqdm from ipywidgets import interact, fixed import zarr import open3d as o3 INPUT_FOLDER = "/kaggle/input/vesuvius-challenge-ink-detection" WORKING_FOLDER = "/kaggle/working/" TEMP_FOLDER = "kaggle/temp/" class TimerError(Exception): pass class Timer: def __init__(self, text=None): if text is not None: self.text = text + ": {:0.4f} seconds" else: self.text = "Elapsed time: {:0.4f} seconds" def logfunc(x): print(x) self.logger = logfunc self._start_time = None def start(self): if self._start_time is not None: raise TimerError("Timer is already running. Use .stop() to stop it.") self._start_time = time.time() def stop(self): if self._start_time is None: raise TimerError("Timer is not running. Use .start() to start it.") elapsed_time = time.time() - self._start_time self._start_time = None if self.logger is not None: self.logger(self.text.format(elapsed_time)) return elapsed_time def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_value, exc_traceback): self.stop() class FragmentImageException(Exception): pass class FragmentImageData: """A general class that uses persistent zarr objects to store the surface volume data, binary data mask, and for training sets, the truth data and infrared image of a papyrus fragment, in a compressed and efficient way. """ def __init__(self, sample_type: str, sample_index: str, working: bool = True): if sample_type not in ("test, train"): raise FragmentImageException( f"Invalid sample type f{sample_type}, must be one of 'test' or 'train'" ) zarrpath = self._zarr_path(sample_type, sample_index, working) if os.path.exists(zarrpath): self.zarr = self.load_from_zarr(zarrpath) else: dirpath = os.path.join(INPUT_FOLDER, sample_type, sample_index) if not os.path.exists(dirpath): raise FragmentImageException( f"No input data found at f{zarrpath} or f{dirpath}" ) self.zarr = self.load_from_directory(dirpath, zarrpath) @property def surface_volume(self): return self.zarr.surface_volume @property def mask(self): return self.zarr.mask @property def truth(self): return self.zarr.truth @property def infrared(self): return self.zarr.infrared @staticmethod def _zarr_path(sample_type: str, sample_index: str, working: bool = True): filename = f"{sample_type}-{sample_index}.zarr" if working: return os.path.join(WORKING_FOLDER, filename) else: return os.path.join(TEMP_FOLDER, filename) @staticmethod def clean_zarr(sample_type: str, sample_index: str, working: bool = True): zarrpath = FragmentImageData._zarr_path(sample_type, sample_index, working) if os.path.exists(zarrpath): shutil.rmtree(zarrpath) @staticmethod def load_from_zarr(filepath): with Timer("Loading from existing zarr"): return zarr.open(filepath, mode="r") @staticmethod def load_from_directory(dirpath, zarrpath): if os.path.exists(zarrpath): raise FragmentImageException( f"Trying to overwrite existing zarr at f{zarrpath}" ) # Initialize the root zarr group and write the file root = zarr.open_group(zarrpath, mode="w") # Load in the surface volume tif files with Timer("Surface volume loading"): init = True imgfiles = sorted( [ imgfile for imgfile in os.listdir(os.path.join(dirpath, "surface_volume")) ] ) for imgfile in imgfiles: print(f"Loading file {imgfile}", end="\r") img_data = np.array( Image.open(os.path.join(dirpath, "surface_volume", imgfile)) ) if init: surface_volume = root.zeros( name="surface_volume", shape=(img_data.shape[0], img_data.shape[1], len(imgfiles)), chunks=(1000, 1000, 4), dtype=img_data.dtype, write_empty_chunks=False, ) init = False z_index = int(imgfile.split(".")[0]) surface_volume[:, :, z_index] = img_data # Load in the mask with Timer("Mask loading"): img_data = np.array( Image.open(os.path.join(dirpath, "mask.png")), dtype=bool ) mask = root.array( name="mask", data=img_data, shape=img_data.shape, chunks=(1000, 1000), dtype=img_data.dtype, write_empty_chunks=False, ) # Load in the truth set (if it exists) with Timer("Truth set loading"): truthfile = os.path.join(dirpath, "inklabels.png") if os.path.exists(truthfile): img_data = np.array(Image.open(truthfile), dtype=bool) truth = root.array( name="truth", data=img_data, shape=img_data.shape, chunks=(1000, 1000), dtype=img_data.dtype, write_empty_chunks=False, ) # Load in the infrared image (if it exists) with Timer("Infrared image loading"): irfile = os.path.join(dirpath, "ir.png") if os.path.exists(irfile): img_data = np.array(Image.open(irfile)) infrared = root.array( name="infrared", data=img_data, shape=img_data.shape, chunks=(1000, 1000), dtype=img_data.dtype, write_empty_chunks=False, ) return root # # Load data FragmentImageData.clean_zarr("train", 1) data = FragmentImageData("train", "1") print(data.surface_volume.info) print(data.mask.info) print(data.truth.info) print(data.infrared.info) with Timer(): plt.imshow(data.mask, cmap="gray") with Timer(): plt.imshow(data.surface_volume[:, :, 20], cmap="gray") # ### Plot vertical slices of the surface volumes with Timer(): plt.figure(figsize=(10, 1)) plt.imshow(data.surface_volume[2000, :, :].T, cmap="gray", aspect="auto") with Timer(): plt.figure(figsize=(10, 1)) plt.imshow(data.surface_volume[:, 2000, :].T, cmap="gray", aspect="auto") # # Dsiplay Voxels data.surface_volume.shape voxelarray = np.array(data.surface_volume[1000:2000, 1000:2000, 0:10]) type(voxelarray) voxelarray.shape ax = plt.figure().add_subplot(projection="3d") # ax.voxels(voxelarray, facecolors=colors, edgecolor='k') ax.voxels(voxelarray, edgecolor="k") # # Create Point Cloud # ## Sample from Surface Volumes data.surface_volume.shape ROWS = data.surface_volume.shape[0] COLS = data.surface_volume.shape[1] Z_DIM = data.surface_volume.shape[2] # number of volume slices N_SAMPLES = 1000 type(np.ravel(data.mask)) with Timer(): # sample from valid regions of surface volume c = np.ravel(data.mask).cumsum() samples = np.random.uniform(low=0, high=c[-1], size=(N_SAMPLES, Z_DIM)).astype(int) # get valid indexes x, y = np.unravel_index(c.searchsorted(samples), data.mask.shape) x, y = x[np.newaxis, ...], y[np.newaxis, ...] # get z dimensions from surface volume locations z = np.arange(0, Z_DIM) z = np.tile(z, N_SAMPLES).reshape(N_SAMPLES, -1)[np.newaxis, ...] # get point cloud xyz = np.vstack((x, y, z)) xyz.shape # ### Get Normalized Intensities intensities = np.zeros((N_SAMPLES, Z_DIM)) with Timer(): for i in range(Z_DIM): img = data.surface_volume[:, :, i] intensities[:, i] = img[xyz[0, :, i], xyz[1, :, i]] / 65535.0 intensities = intensities.astype(np.float32) # #### Sanity Check print(xyz[:, 20, 1], intensities[20, 1]) print( xyz.T.reshape((-1, 3))[20 + N_SAMPLES, :], intensities.T.reshape((-1))[20 + N_SAMPLES], ) # ### Reshape and Normalize xyz = xyz.T.reshape((-1, 3)) xyz = xyz / xyz.max(axis=0) intensities = intensities.T.reshape((-1)).repeat((3)).reshape((-1, 3)) # ## Get Colormap and Convert to Point Cloud colors = plt.get_cmap("bone") # also use 'cool', 'bone' colors pcd = o3.geometry.PointCloud() pcd.points = o3.utility.Vector3dVector(xyz) pcd.colors = o3.utility.Vector3dVector(colors(intensities)[:, 0, :3]) pcd # # Display Point Cloud o3.visualization.draw_plotly([pcd]) voxel_grid voxel_grid = o3.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.005) o3.visualization.draw_plotly([voxel_grid]) o3.visualization.draw_geometries([voxel_grid]) idx = 0 cnts, bins, _ = plt.hist( np.ravel(data.surface_volume[:, :, idx][data.mask]) / 65535.0, bins=100 ) plt.savefig(os.path.join(WORKING_FOLDER, f"hist_{idx}.png")) # # Animate intensity Histograms for each surface layer # Animation code resused from: https://www.kaggle.com/code/leonidkulyk/eda-vc-id-volume-layers-animation from celluloid import Camera fig, ax = plt.subplots(1, 1) camera = Camera(fig) # define the camera that gets the fig we'll plot for i in range(Z_DIM): cnts, bins, _ = plt.hist( np.ravel(data.surface_volume[:, :, i][data.mask]) / 65535.0, bins=100 ) ax.set_title(f"Surfacer Layer: {i}") camera.snap() # the camera takes a snapshot of the plot plt.close(fig) # close figure animation = camera.animate() # get plt animation fix_video_adjust = ( "<style> video {margin: 0px; padding: 0px; width:100%; height:auto;} </style>" ) display(HTML(fix_video_adjust + animation.to_html5_video())) # displaying the animation
import os from tqdm.auto import tqdm import time, gc import numpy as np import pandas as pd # pd.set_option('display.max_columns', None) from matplotlib import pyplot as plt import cv2 import albumentations as A from keras.preprocessing.image import ImageDataGenerator from keras.models import Model, Input, load_model from keras.layers import Dense, Conv2D, Flatten, Activation, Concatenate from keras.layers import MaxPool2D, AveragePooling2D, GlobalAveragePooling2D from keras.layers import Dropout, BatchNormalization from keras.optimizers import Adam from keras import backend as K from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping from keras.initializers import RandomNormal from keras.applications import DenseNet121 from sklearn.model_selection import train_test_split start_time = time.time() # ## Resource path setting dataset = "/kaggle/input/bengaliai-cv19" pretrained = "../input/bangla-graphemes-pretrained-weights" # ## Checking Model if os.path.isfile( os.path.join(pretrained, "GraphemeDenseNet121.h5") ) and os.path.isfile(os.path.join(pretrained, "hist.csv")): print("Model is present") else: print("Error. No Model Found") # ## Size and Channel of images SIZE = 112 # input image size N_ch = 1 # ## Loading Pretrained Densenet121 Model # ### Batch Size: 256 # ### Epochs: 30 (Early Stopped in 20) model = load_model(os.path.join(pretrained, "GraphemeDenseNet121.h5")) # ## DenseNet121 Model Summary model.summary() # ## Loading Images and Pre-processing # Resize image size def resize(df, size=112): resized = {} resize_size = 112 angle = 0 for i in range(df.shape[0]): image = df.loc[df.index[i]].values.reshape(137, 236) # Centering image_center = tuple(np.array(image.shape[1::-1]) / 2) matrix = cv2.getRotationMatrix2D(image_center, angle, 1.0) image = cv2.warpAffine( image, matrix, image.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0), ) # Scaling matrix = cv2.getRotationMatrix2D(image_center, 0, 1.0) image = cv2.warpAffine( image, matrix, image.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0), ) # Removing Blur # aug = A.GaussianBlur(p=1.0) # image = aug(image=image)['image'] # Noise Removing # augNoise=A.MultiplicativeNoise(p=1.0) # image = augNoise(image=image)['image'] # Removing Distortion # augDist=A.ElasticTransform(sigma=50, alpha=1, alpha_affine=10, p=1.0) # image = augDist(image=image)['image'] # Brightness augBright = A.RandomBrightnessContrast(p=1.0) image = augBright(image=image)["image"] _, thresh = cv2.threshold( image, 30, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU ) contours, _ = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[ -2: ] idx = 0 ls_xmin = [] ls_ymin = [] ls_xmax = [] ls_ymax = [] for cnt in contours: idx += 1 x, y, w, h = cv2.boundingRect(cnt) ls_xmin.append(x) ls_ymin.append(y) ls_xmax.append(x + w) ls_ymax.append(y + h) xmin = min(ls_xmin) ymin = min(ls_ymin) xmax = max(ls_xmax) ymax = max(ls_ymax) roi = image[ymin:ymax, xmin:xmax] resized_roi = cv2.resize( roi, (resize_size, resize_size), interpolation=cv2.INTER_AREA ) # image=affine_image(image) # image= crop_resize(image) # image = cv2.resize(image,(size,size),interpolation=cv2.INTER_AREA) # image=resize_image(image,(64,64)) # image = cv2.resize(image,(size,size),interpolation=cv2.INTER_AREA) # gaussian_3 = cv2.GaussianBlur(image, (5,5), cv2.BORDER_DEFAULT) #unblur # image = cv2.addWeighted(image, 1.5, gaussian_3, -0.5, 0, image) # kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) #filter # image = cv2.filter2D(image, -1, kernel) # ret,image = cv2.threshold(image, 128, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) resized[df.index[i]] = resized_roi.reshape(-1) resized_df = pd.DataFrame(resized).T return resized_df # ## Accuracy and Loss Curve df = pd.read_csv(os.path.join(pretrained, "hist.csv")) # Plot the loss and accuracy curves for training and validation fig, ax = plt.subplots(1, 2, figsize=(12, 4)) ax[0].plot( df[ [ "root_loss", "vowel_loss", "consonant_loss", "val_root_loss", "val_vowel_loss", "val_consonant_loss", ] ] ) ax[0].set_ylim(0, 2) ax[0].set_title("Loss") ax[0].legend( [ "train_root_loss", "train_vowel_loss", "train_conso_loss", "val_root_loss", "val_vowel_loss", "val_conso_loss", ], loc="upper right", ) ax[0].grid() ax[1].plot( df[ [ "root_acc", "vowel_acc", "consonant_acc", "val_root_acc", "val_vowel_acc", "val_consonant_acc", ] ] ) ax[1].set_ylim(0.5, 1) ax[1].set_title("Accuracy") ax[1].legend( [ "train_root_acc", "train_vowel_acc", "train_conso_acc", "val_root_acc", "val_vowel_acc", "val_conso_acc", ], loc="lower right", ) ax[1].grid() # ## Target Columns tgt_cols = ["grapheme_root", "vowel_diacritic", "consonant_diacritic"] # ## Prediction on Test Images row_ids = [] targets = [] id = 0 for i in range(4): img_df = pd.read_parquet( os.path.join(dataset, "test_image_data_" + str(i) + ".parquet") ) img_df = img_df.drop("image_id", axis=1) img_df = resize(img_df, SIZE) / 255 X_test = img_df.values.reshape(-1, SIZE, SIZE, N_ch) preds = model.predict(X_test) for j in range(len(X_test)): for k in range(3): row_ids.append("Test_" + str(id) + "_" + tgt_cols[k]) targets.append(np.argmax(preds[k][j])) id += 1 # ## Creating Submission CSV File df_submit = pd.DataFrame( {"row_id": row_ids, "target": targets}, columns=["row_id", "target"] ) df_submit.to_csv("submission.csv", index=False) df_submit.head(10)
# # Corona Virus 2019 # This Notebook have visualize info for Corona Virus spread all over the word also some insights from the data like which country has maximum spread and which country has max success rate in coronavirus cure. # # Insight # Thiland has max(more than 25% people are recoverd) recoverd ratio so the medicine used by them are effective. import pandas as pd data = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv") data.info() # # Data Cleaning and EDA data["Date"] = pd.to_datetime(data.Date) data["Date"] = pd.DatetimeIndex(data["Date"]).date data["Date"] = data.Date.apply(str) data.groupby("Date").sum()["Confirmed"] c = data.groupby("Country").sum() c = c.drop(["Sno"], axis=1) c.style.background_gradient(cmap="rainbow") print("Total number of Corona Virus Confirmed Case are " + str(sum(data.Confirmed))) print("Total number of Corona Virus Deaths are " + str(sum(data.Deaths))) print("Total number of Corona Virus Recovered " + str(sum(data.Recovered))) # ## Visualization import seaborn as sns import matplotlib.pyplot as plt fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 10 fig_size[1] = 8 plt.rcParams["figure.figsize"] = fig_size groupedvalues = data.groupby("Date").sum().reset_index() sns.set(style="whitegrid") sns.barplot(x="Confirmed", y="Date", data=groupedvalues) plt.title( "Number of Confirmed Corona Virus cases each day from 22-01-2020 to 18-02-2020" ) sns.barplot(x="Deaths", y="Date", data=groupedvalues) plt.title("Number of Deaths due to Corona Virus from 22-01-2020 to 18-02-2020") sns.barplot(x="Recovered", y="Date", data=groupedvalues) plt.title("Number of Recovered cases from 22-01-2020 to 18-02-2020") groupedvalues = groupedvalues.drop(["Sno"], axis=1) df = groupedvalues.melt(id_vars=["Date"], var_name="Type", value_name="NumberOfPeople") fig, ax = plt.subplots() sns.set(style="whitegrid") sns.barplot(x="Date", y="NumberOfPeople", data=df, hue="Type") fig.autofmt_xdate() # ## Corona Virus Spread On World Map import plotly.express as px groupedvalues = data.groupby("Country").sum().reset_index() groupedvalues = groupedvalues.drop(["Sno"], axis=1) fig = px.scatter_geo( groupedvalues, locations="Country", locationmode="country names", color="Confirmed", hover_name="Country", range_color=[0, 20], projection="natural earth", title="Spread across the world", ) fig.update(layout_coloraxis_showscale=False) fig.show() # # Ratio of Recovered people in each country groupedvalues["RecoveredRatio"] = (groupedvalues["Recovered"] * 100) / groupedvalues[ "Confirmed" ] fig, ax = plt.subplots() sns.set(style="whitegrid") sns.barplot(x="Country", y="RecoveredRatio", data=groupedvalues) fig.autofmt_xdate()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import zipfile path = "/kaggle/input/dogs-vs-cats/train.zip" zip_ref = zipfile.ZipFile(path, "r") zip_ref.extractall("/kaggle/working/") path = "/kaggle/input/dogs-vs-cats/test1.zip" zip_ref = zipfile.ZipFile(path, "r") zip_ref.extractall("/kaggle/working/") zip_ref.close() # import pandas as pd # sampleSubmission = pd.read_csv("../input/dogs-vs-cats/sampleSubmission.csv") # print(sampleSubmission) import os dirname = "/kaggle/working/test1/dog" os.mkdir(dirname) dirname = "/kaggle/working/test1/cat" os.mkdir(dirname) dirname = "/kaggle/working/train/dog" os.mkdir(dirname) dirname = "/kaggle/working/train/cat" os.mkdir(dirname) dirname = "/kaggle/working/validation" os.mkdir(dirname) dirname = "/kaggle/working/validation/cat" os.mkdir(dirname) dirname = "/kaggle/working/validation/dog" os.mkdir(dirname) # for dirname, _, filenames in os.walk('/kaggle/working/test1'): # print(filenames) import shutil for dirname, _, filenames in os.walk("/kaggle/working/train"): for filename in filenames: if filename[:3] == "dog": dog_path = os.path.join(dirname, filename) shutil.move(dog_path, "/kaggle/working/train/dog/" + filename) if filename[:3] == "cat": cat_path = os.path.join(dirname, filename) shutil.move(cat_path, "/kaggle/working/train/cat/" + filename) import shutil for dirname, _, filenames in os.walk("/kaggle/working/train/cat/"): i = 0 for filename in filenames: if i < 1300: dog_path = os.path.join(dirname, filename) shutil.move(dog_path, "/kaggle/working/validation/cat/" + filename) i = i + 1 for dirname, _, filenames in os.walk("/kaggle/working/train/dog/"): i = 0 for filename in filenames: if i < 1300: dog_path = os.path.join(dirname, filename) shutil.move(dog_path, "/kaggle/working/validation/dog/" + filename) i = i + 1 # for dirname, _, filenames in os.walk('/kaggle/working/train/cat/'): # print(filenames) # **Begin Algorithm** import tensorflow as tf import os import zipfile from os import path, getcwd, chdir def train_model(): model = tf.keras.models.Sequential( [ tf.keras.layers.Conv2D( 16, (3, 3), input_shape=(64, 64, 3), activation="relu" ), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) from tensorflow.keras.optimizers import RMSprop model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255) train_generator = train_datagen.flow_from_directory( "/kaggle/working/train/", target_size=(64, 64), batch_size=500, class_mode="binary", ) validation_generator = train_datagen.flow_from_directory( "/kaggle/working/validation/", target_size=(64, 64), batch_size=50, class_mode="binary", ) history = model.fit_generator( train_generator, epochs=100, # steps_per_epoch=10, validation_data=validation_generator, verbose=1, ) model.save("model_loc.h5") return history.history["acc"][-1] train_model()
# **Medical Cost Prediction** # ## *Load the data* import pandas as pd import numpy as np import matplotlib.pyplot as plt insurance = pd.read_csv("../input/insurance/insurance.csv") insurance.head(5) # # Explore dataset insurance.info() insurance.describe(include="all") insurance.hist(bins=50, figsize=(12, 8)) plt.show() # ## Split Train-Test # ## We need to stratify train and test sets in order to make the data representative insurance["age_cat"] = pd.cut( insurance["age"], bins=[0.0, 20.0, 30.0, 40.0, 50.0, 60.0, np.inf], labels=[1, 2, 3, 4, 5, 6], ) insurance["age_cat"].value_counts().sort_index().plot.bar(rot=0, grid=True) plt.xlabel("Age category") plt.ylabel("Number of Patients") plt.show() from sklearn.model_selection import train_test_split strat_train_set, strat_test_set = train_test_split( insurance, test_size=0.2, stratify=insurance["age_cat"], random_state=42 ) strat_test_set["age_cat"].value_counts() / len(strat_test_set) insurance["age_cat"].value_counts() / len(insurance) # # Visualize dataset # from pandas.plotting import scatter_matrix attrs = ["age", "bmi", "charges"] scatter_matrix(strat_train_set[attrs], figsize=(12, 8)) plt.show() corr_matrix = strat_train_set.corr() corr_matrix["charges"].sort_values(ascending=False) # it looks like their can be a linear relationship between age, bmi and charges test_with_log_charges = strat_train_set.copy() test_with_log_charges["log_charges"] = np.log(test_with_log_charges["charges"]) corr_matrix = test_with_log_charges.corr() corr_matrix["log_charges"].sort_values(ascending=False) # the log of labels has a much higher correlation with charges # can be useful too try later when training the model # # Preprocessing insurance = strat_train_set.drop(["charges"], axis=1) insurance_labels = np.log(test_with_log_charges["charges"]) from sklearn.compose import ( ColumnTransformer, make_column_selector, make_column_transformer, ) from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, OneHotEncoder, FunctionTransformer num_pipeline = Pipeline( [ ("standardize", StandardScaler()), ] ) cat_pipeline = Pipeline([("onehot", OneHotEncoder(handle_unknown="ignore"))]) preprocessing = make_column_transformer( (num_pipeline, make_column_selector(dtype_include=np.number)), (cat_pipeline, make_column_selector(dtype_include=object)), ) insurance_prepared = preprocessing.fit_transform(insurance) target_scaler = StandardScaler() scaled_labels = target_scaler.fit_transform(insurance_labels.to_frame()) view_prep = pd.DataFrame( insurance_prepared, columns=preprocessing.get_feature_names_out(), index=insurance.index, ) view_prep # # Model Selection from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error lig_reg = Pipeline([("preprocess", preprocessing), ("regressor", LinearRegression())]) lig_reg.fit(insurance, scaled_labels) insurance_predictions = lig_reg.predict(insurance) lin_rmse = mean_squared_error(scaled_labels, insurance_predictions, squared=False) print(f"Rmse = {lin_rmse}") # the mean error is pretty good. Let's try other regression models from sklearn.tree import DecisionTreeRegressor tree_reg = Pipeline( [ ("preprocess", preprocessing), ("regressor", DecisionTreeRegressor(random_state=42)), ] ) tree_reg.fit(insurance, scaled_labels) insurance_predictions = tree_reg.predict(insurance) tree_rmse = mean_squared_error(scaled_labels, insurance_predictions, squared=False) print(f"Rmse = {tree_rmse}") # Well desision tree is almost amazing from sklearn.ensemble import RandomForestRegressor random_reg = Pipeline( [ ("preprocess", preprocessing), ("regressor", RandomForestRegressor(random_state=42)), ] ) random_reg.fit(insurance, scaled_labels) insurance_predictions = random_reg.predict(insurance) random_rmse = mean_squared_error(scaled_labels, insurance_predictions, squared=False) print(f"Rmse = {random_rmse}") # Looks like the best model is the decision tree regressor let's use that in cross val from sklearn.model_selection import cross_val_score tree_rmses = -cross_val_score( tree_reg, insurance, scaled_labels, scoring="neg_root_mean_squared_error", cv=10 ) pd.Series(tree_rmses).describe() # Well turns out it highly overfitted the data let's check other methods from sklearn.model_selection import cross_val_score random_rmses = -cross_val_score( random_reg, insurance, scaled_labels, scoring="neg_root_mean_squared_error", cv=10 ) pd.Series(random_rmses).describe() from sklearn.model_selection import cross_val_score lin_rmses = -cross_val_score( lig_reg, insurance, scaled_labels, scoring="neg_root_mean_squared_error", cv=10 ) pd.Series(lin_rmses).describe() # Overall random_forest is the best # # Fine Tuning the model from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint full_pipeline = Pipeline( [ ("preprocessing", preprocessing), ("random_forest", RandomForestRegressor(random_state=42)), ] ) params_distribs = {"random_forest__max_features": randint(low=2, high=20)} rnd_search = RandomizedSearchCV( full_pipeline, param_distributions=params_distribs, n_iter=10, cv=3, scoring="neg_root_mean_squared_error", random_state=42, ) rnd_search.fit(insurance, scaled_labels) cv_res = pd.DataFrame(rnd_search.cv_results_) cv_res ## Feature importance and get the final model final_model = rnd_search.best_estimator_ feature_importances = final_model["random_forest"].feature_importances_ sorted( zip(feature_importances, final_model["preprocessing"].get_feature_names_out()), reverse=True, ) # **It looks like the most important features are : 'smoker', 'bmi'and 'age'** # # Testing X_test = strat_test_set.drop(["charges"], axis=1) y_test = strat_test_set["charges"].copy() scaled_y_test = target_scaler.fit_transform(np.log(y_test).to_frame()) scaled_final_predictions = final_model.predict(X_test) final_predictions = np.exp( target_scaler.inverse_transform(scaled_final_predictions.reshape(-1, 1)) ) final_rmse = mean_squared_error(y_test, final_predictions, squared=False) print(f"Final Rmse Test Score : {final_rmse}") final_model.score(X_test, scaled_y_test)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import pandas as pd complaints = pd.read_csv("../input/complaints.csv") # Print out the top 5 rows of the dataframe complaints.head() # Number of rows and columns in the df complaints.shape # Print out a list of all the column names in the df complaints.columns # Print the number of columns in the df len(complaints.columns) # Copy the dataframe with only a few of the columns # df1 = df[['a','b']] complaints_small = complaints[["Product", "Issue", "Company public response", "State"]] complaints_small.head() # df.groupby('age').size() complaints_small.groupby("State").size() # df.loc[df['column_name'] == some_value] complaints_small.loc[complaints_small["State"] == "WA"]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from scipy.io import mmread import networkx as nx g1_scipy_sparse = mmread("../input/tdl-a1-dataset/graph1.mtx") G1 = nx.from_scipy_sparse_matrix(g1_scipy_sparse) # Compute statistics for graph1 num_nodes1 = G1.number_of_nodes() num_edges1 = G1.number_of_edges() degree_stats1 = nx.degree(G1) mean_degree1 = sum(dict(degree_stats1).values()) / num_nodes1 min_degree1 = min(dict(degree_stats1).values()) max_degree1 = max(dict(degree_stats1).values()) density1 = nx.density(G1) sparsity1 = 1 - density1 # Load graph2 from file g2_scipy_sparse = mmread("../input/tdl-a1-dataset/graph2.mtx") G2 = nx.from_scipy_sparse_matrix(g2_scipy_sparse) # Compute statistics for graph2 num_nodes2 = G2.number_of_nodes() num_edges2 = G2.number_of_edges() degree_stats2 = nx.degree(G2) mean_degree2 = sum(dict(degree_stats2).values()) / num_nodes2 min_degree2 = min(dict(degree_stats2).values()) max_degree2 = max(dict(degree_stats2).values()) density2 = nx.density(G2) sparsity2 = 1 - density2 # Print statistics for both graphs print("Graph1 statistics:") print("Number of nodes:", num_nodes1) print("Number of edges:", num_edges1) print("Mean degree:", mean_degree1) print("Minimum degree:", min_degree1) print("Maximum degree:", max_degree1) print("Density:", density1) print("Sparsity:", sparsity1) print("Graph2 statistics:") print("Number of nodes:", num_nodes2) print("Number of edges:", num_edges2) print("Mean degree:", mean_degree2) print("Minimum degree:", min_degree2) print("Maximum degree:", max_degree2) print("Density:", density2) print("Sparsity:", sparsity2) # Find 3-cliques and 4-cliques cliques3 = list(nx.find_cliques(G1)) cliques4 = [c for c in cliques3 if len(c) >= 4] # Compute node centrality centrality = nx.degree_centrality(G1) # Compute clustering coefficients clustering = nx.clustering(G1) # Print results print(f"Number of nodes: {G1.number_of_nodes()}") print(f"Number of edges: {G1.number_of_edges()}") print(f"Number of 3-cliques: {len(cliques3)}") print(f"Number of 4-cliques: {len(cliques4)}") print(f"Mean degree centrality: {sum(centrality.values()) / len(centrality)}") print(f"Max clustering coefficient: {max(clustering.values())}") # Find 3-cliques and 4-cliques cliques3 = list(nx.find_cliques(G2)) cliques4 = [c for c in cliques3 if len(c) >= 4] # Compute node centrality centrality = nx.degree_centrality(G2) # Compute clustering coefficients clustering = nx.clustering(G2) # Print results print(f"Number of nodes: {G2.number_of_nodes()}") print(f"Number of edges: {G2.number_of_edges()}") print(f"Number of 3-cliques: {len(cliques3)}") print(f"Number of 4-cliques: {len(cliques4)}") print(f"Mean degree centrality: {sum(centrality.values()) / len(centrality)}") print(f"Max clustering coefficient: {max(clustering.values())}") from node2vec import Node2Vec from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score, average_precision_score, f1_score from sklearn.model_selection import train_test_split # Learn node embeddings using Node2Vec node2vec = Node2Vec(G1, dimensions=16, walk_length=30, num_walks=200, workers=4) model1 = node2vec.fit() embeddings1 = {node: model1.wv.get_vector(node) for node in G1.nodes()} node2vec = Node2Vec(G2, dimensions=16, walk_length=30, num_walks=200, workers=4) model2 = node2vec.fit() embeddings2 = {node: model2.wv.get_vector(node) for node in G2.nodes()} # Split the edges of the graph datasets into train and test sets edges1 = list(G1.edges()) edges2 = list(G2.edges()) train_edges1, test_edges1 = train_test_split(edges1, test_size=0.2, random_state=42) train_edges2, test_edges2 = train_test_split(edges2, test_size=0.2, random_state=42) print(embeddings1[0]) print(type(embeddings1)) print(len(embeddings1)) # print(embeddings1.keys()) print(type(np.array(embeddings1.values()))) # use cosine similarity to predict new links from sklearn.metrics.pairwise import cosine_similarity feature_vectors = list(list(ele) for ele in embeddings1.values()) # print() # feauture_vectors = [list(arr) for arr in feature_vectors] similarity_matrix = cosine_similarity(feature_vectors) # set a threshold for similarity scores threshold = 0.9 # generate a list of new links with similarity scores above the threshold new_links = [] for i in range(len(similarity_matrix)): for j in range(i + 1, len(similarity_matrix)): if similarity_matrix[i, j] > threshold: new_links.append((i + 1, j + 1, similarity_matrix[i, j])) # print the predicted new links with their similarity scores print(new_links[:5]) predicted_edges = [] for link in new_links: if not G1.has_edge(link[0], link[1]): # print(link) predicted_edges.append(link) # # Evaluate the performance of the model using evaluation metrics auc_roc1 = roc_auc_score(y_test1, y_pred1) auc_pr1 = average_precision_score(y_test1, y_pred1) f1_score1 = f1_score(y_test1, y_pred1.round()) auc_roc2 = roc_auc_score(y_test2, y_pred2) auc_pr2 = average_precision_score(y_test2, y_pred2) f1_score2 = f1_score(y_test2, y_pred2.round()) print( f"Graph 1 link prediction results: AUC-ROC={auc_roc1:.4f}, AUC-PR={auc_pr1:.4f}, F1-score={f1_score1:.4f}" ) # print(f"Graph 2 link prediction results: AUC") from torch_geometric.utils import erdos_renyi_graph, to_networkx, from_networkx graph = from_networkx(G1) print(graph) import networkx as nx import matplotlib.pyplot as plt # create the KG G = nx.MultiDiGraph() G.add_node("Alice", entity_type="person", id="1") G.add_node("Bob", entity_type="person", id="2") G.add_node("Charlie", entity_type="person", id="3") G.add_node("David", entity_type="person", id="4") G.add_node("Eve", entity_type="person", id="5") # {"id": 6, "type": "Language", "name": "English"}, G.add_node("English", entity_type="language", id="6") G.add_node("Spanish", entity_type="language", id="7") G.add_node("Chinese", entity_type="language", id="8") # {"id": 9, "type": "Country", "name": "United States"}, G.add_node("United States", entity_type="country", id="9") G.add_node("Mexico", entity_type="country", id="10") G.add_node("China", entity_type="country", id="11") G.add_node("Software Engineer", entity_type="job", id="12") G.add_node("Data Scientist", entity_type="job", id="13") G.add_node("Cooking", entity_type="hobby", id="14") G.add_node("Hiking", entity_type="hobby", id="15") G.add_node("Reading", entity_type="hobby", id="16") G.add_node("Music", entity_type="hobby", id="17") # {"id": 18, "type": "Organization", "name": "Google"}, # {"id": 19, "type": "Organization", "name": "Microsoft"}, # {"id": 20, "type": "Organization", "name": "Amazon"} G.add_node("Google", entity_type="organization", id="18") G.add_node("Microsoft", entity_type="organization", id="19") G.add_node("Amazon", entity_type="organization", id="20") # edges G.add_edge("Alice", "English", relation_type="Speaks") G.add_edge("Bob", "English", relation_type="Speaks") G.add_edge("Charlie", "Spanish", relation_type="Speaks") G.add_edge("David", "English", relation_type="Speaks") G.add_edge("Eve", "Spanish", relation_type="Speaks") G.add_edge("Alice", "United States", relation_type="LivesIn") G.add_edge("Bob", "United States", relation_type="LivesIn") G.add_edge("Charlie", "Mexico", relation_type="LivesIn") G.add_edge("David", "United States", relation_type="LivesIn") G.add_edge("Eve", "Mexico", relation_type="LivesIn") G.add_edge("Alice", "Software Engineer", relation_type="WorksAs") G.add_edge("Bob", "Software Engineer", relation_type="WorksAs") G.add_edge("Charlie", "Data Scientist", relation_type="WorksAs") G.add_edge("David", "Software Engineer", relation_type="WorksAs") G.add_edge("Eve", "Software Engineer", relation_type="WorksAs") G.add_edge("Alice", "Cooking", relation_type="Likes") G.add_edge("Bob", "Hiking", relation_type="Likes") # set node and edge colors based on entity and relation types node_colors = { "person": "lightblue", "language": "lightgreen", "country": "pink", "job": "pink", "hobby": "pink", "organization": "pink", } edge_colors = {"Speaks": "blue", "LivesIn": "red", "WorksAs": "green", "Likes": "black"} for n in G.nodes(): print(G.nodes[n]["entity_type"]) for e in G.edges.data(): print(e[2]["relation_type"]) node_color_list = [node_colors[G.nodes[n]["entity_type"]] for n in G.nodes()] edge_color_list = [edge_colors[e[2]["relation_type"]] for e in G.edges.data()] # draw the KG plt.figure(figsize=(16, 16)) pos = nx.spring_layout(G, seed=42) nx.draw_networkx_nodes(G, pos, node_color=node_color_list) nx.draw_networkx_labels(G, pos) nx.draw_networkx_edges(G, pos, edge_color=edge_color_list, arrows=True) # set legend labels and colors node_legend_labels = [ ("person", "lightblue"), ("language", "lightgreen"), ("country", "pink"), ("job", "pink"), ("hobby", "pink"), ("orgainzation", "pink"), ] edge_legend_labels = [ ("Speaks", "blue"), ("LivesIn", "red"), ("WorksAs", "green"), ("Likes", "black"), ] node_legend_colors = [color for _, color in node_legend_labels] edge_legend_colors = [color for _, color in edge_legend_labels] # draw the legend node_legend = plt.legend( node_legend_labels, loc="upper left", title="Node Types", frameon=True, facecolor="white", framealpha=1, ) plt.axis("off") plt.show() # # ONE HOP QUERY # import networkx as nx # # create the KG # G = nx.MultiDiGraph() # # G.add_node('entity1', type='type1') # # G.add_node('entity2', type='type2') # # G.add_edge('entity1', 'entity2', relation='relation1') # # execute a one-hop query # results = [] # for node, attr in G.nodes(data=True): # if attr['type'] == 'type1': # neighbors = G.neighbors(node) # for neighbor in neighbors: # if G[node][neighbor][0]['relation'] == 'relation1': # results.append((node, neighbor)) # # print the query results # print(results) # # PATH QUERY # import networkx as nx # # create the KG # G = nx.MultiDiGraph() # G.add_node('entity1', type='type1') # G.add_node('entity2', type='type2') # G.add_node('entity3', type='type3') # G.add_edge('entity1', 'entity2', relation='relation1') # G.add_edge('entity2', 'entity3', relation='relation2') # # execute a path query # results = [] # for start_node, start_attr in G.nodes(data=True): # if start_attr['type'] == 'type1': # for end_node, end_attr in G.nodes(data=True): # if end_attr['type'] == 'type3': # paths = nx.all_simple_paths(G, start_node, end_node, cutoff=2) # for path in paths: # if G[path[0]][path[1]][0]['relation'] == 'relation1' and G[path[1]][path[2]][0]['relation'] == 'relation2': # results.append(path) # # print the query results # print(results)
# # Project Name :- Bike Rental # ### Project Description - # Objective of the analysis is to find out the determining factor that drives the demand on bike share rentals, # construct statistical models and then try to make prediction on rentals based on the information and models we have. # Exploration and the analysis of the data will be performed in R and Python. # ### Loading Libraries & Data import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import chi2_contingency import os import statistics from sklearn.metrics import r2_score from scipy import stats from sklearn.model_selection import train_test_split, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_squared_error from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.ensemble import GradientBoostingRegressor from sklearn.metrics import classification_report from sklearn.metrics import roc_curve, auc, roc_auc_score sns.set_style("whitegrid") import warnings warnings.filterwarnings("ignore") # Setting working directory os.chdir("C:/Users/Click/Desktop/Bike rental") print(os.getcwd()) # Loading Dataset data = pd.read_csv("Bike_Rental.csv") data = pd.DataFrame(data) # Creating Duplicate instances of data for Preprocessing and exploration df = data.copy() # ### Exploring Data data.head(5) # Checking info of data -> data types and rows n cols data.info() # This shows that we have no Missing Values for any column. data.describe() # calculating number of unique values for all df columns data.nunique() data.columns ##We know that 'cnt' which is our target variable is sum of two other variables - 'registered' and 'casusal'. #'instant' variable is of no use and can be dropped #'dteday' variable is a date column which is not significant in our analysis and can be excluded # So we will drop these variables now itself drop1 = ["casual", "registered", "instant", "dteday"] data = data.drop(drop1, axis=1) # Variables are " Continuos" and "Categorical" con = ["temp", "atemp", "hum", "windspeed", "cnt"] cat = ["season", "yr", "mnth", "holiday", "weekday", "workingday", "weathersit"] # Target Variable probability data distribution plt.figure(figsize=(8, 6)) plt.hist(data["cnt"], normed=True, bins=30) plt.ylabel("Probability", fontsize=15) plt.xlabel("Number of Users", fontsize=15) plt.savefig("Count of Users.png") plt.title("Bike Rental Statistics", fontsize=20) # Function to view the categories present in each categorical feature and thier values def view_feature_cat(obj): for i in range(len(obj)): print("*******************************************") print("Feature:", obj[i]) print("-----------------------") print(data[str(obj[i])].value_counts()) print("*******************************************") view_feature_cat(cat) # ### Data Understanding sns.catplot(x="weekday", y="cnt", data=data) plt.savefig("days_bikecnt.png") sns.catplot(x="mnth", y="cnt", data=data) plt.savefig("mnth_bikecnt.png") sns.catplot(x="season", y="cnt", data=data) plt.savefig("season_bikecnt.png") sns.catplot(x="weathersit", y="cnt", data=data) plt.savefig("hol_bikecnt.png") # Checking the distribution of values for variables in data for i in con: if i == "cnt": continue sns.distplot(data[i], bins="auto") plt.title("Checking Distribution for Variable " + str(i)) plt.ylabel("Density") plt.savefig("{i}_Vs_Density.png".format(i=i)) plt.show() # ### OutLier Analysis """def box_plot(x): plt.boxplot(data[x]) plt.xlabel(x,fontsize= 15) plt.ylabel('Values',fontsize= 15) plt.xticks(fontsize=10, rotation=90) plt.yticks(fontsize=10) plt.title("Boxplot for {X}".format(X=x),fontsize = 20) plt.savefig("Boxplot for {X}.png".format(X=x)) plt.show() box_plot('windspeed') box_plot('temp') box_plot('atemp') box_plot('hum')""" box = plt.boxplot( [data["temp"], data["atemp"], data["hum"], data["windspeed"]], patch_artist=True ) plt.xlabel(["1. Temperature", "2. Feeling Temperature", "3. Humidity", "4. Windspeed"]) plt.title("BoxPlot of the Variables for Weather Conditions") colors = ["cyan", "lightblue", "lightgreen", "tan"] for patch, color in zip(box["boxes"], colors): patch.set_facecolor(color) plt.ylabel("Values") plt.savefig("BoxPlot of the Variables for Weather Conditions") box2 = plt.boxplot([data["cnt"]], patch_artist=True) plt.xlabel(["1. Total Count"]) plt.title("BoxPlot of the Variables for user count") colors = ["red"] for patch, color in zip(box2["boxes"], colors): patch.set_facecolor(color) plt.ylabel("Values") plt.savefig("BoxPlot of the Variables for user count") # From the above boxplot we can conclude that there are outliers windspeed variables # Getting 75 and 25 percentile of variable "windspeed" q75, q25 = np.percentile(data["windspeed"], [75, 25]) # Calculating Interquartile range iqr = q75 - q25 # Calculating upper extream and lower extream minimum = q25 - (iqr * 1.5) maximum = q75 + (iqr * 1.5) # Replacing all the outliers value to NA data.loc[data["windspeed"] < minimum, "windspeed"] = np.nan data.loc[data["windspeed"] > maximum, "windspeed"] = np.nan # Checking % of missing values data.isnull().sum().sum() # Checking missing values in train dataset print(data.isnull().sum()) # result shows there are missing values in the dataset ##we will impute the missing values which was outlier values by using mean imputation # we chose mean imputation because median imputation is majorly suitable for the data having outliers ## as we dont have outliers so we will choose mean imputation over KNN. data["windspeed"] = data["windspeed"].fillna(data["windspeed"].mean()) print(data.isnull().sum()) # ### Feature Selection # Code for plotting pairplot sns_plot = sns.pairplot(data=data[con]) plt.plot() plt.savefig("Pairplot") ##Correlation analysis for continuous variables # Correlation plot data_corr = data.loc[:, con] # Set the width and hieght of the plot f, ax = plt.subplots(figsize=(10, 10)) # Generate correlation matrix corr = data_corr.corr() # Plot using seaborn library sns.heatmap( corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 50, as_cmap=True), square=True, ax=ax, annot=True, ) plt.plot() plt.savefig("Heatmap") label = "cnt" obj_dtype = cat drop_feat = [] ## ANOVA TEST FOR P VALUES import statsmodels.api as sm from statsmodels.formula.api import ols anova_p = [] for i in obj_dtype: buf = label + " ~ " + i mod = ols(buf, data=data).fit() anova_op = sm.stats.anova_lm(mod, typ=2) print(anova_op) anova_p.append(anova_op.iloc[0:1, 3:4]) p = anova_op.loc[i, "PR(>F)"] if p >= 0.05: drop_feat.append(i) drop_feat # As a result of correlation analysis and ANOVA, we have concluded that we should remove 6 columns #'temp' and 'atemp' are correlated and hence one of them should be removed #'holiday', 'weekday' and 'workingday' have p>0.05 and hence should be removed # Droping the variables which has redundant information to_drop = ["atemp", "holiday", "weekday", "workingday"] data = data.drop(to_drop, axis=1) data.info() # Updating the Continuous and Categorical Variables after droping correlated variables con = [i for i in con if i not in to_drop] cat = [i for i in cat if i not in to_drop] # ### Feature Scaling # Checking the distribution of values for variables in data for i in con: if i == "data": continue sns.distplot(data[i], bins="auto") plt.title("Checking Distribution for Variable " + str(i)) plt.ylabel("Density") plt.savefig("{i}_Vs_Density.png".format(i=i)) plt.show() # Data before scaling data.head() # Since our data is normally distributed, we will use Standardization for Feature Scalling # #Standardization for i in con: if i == "cnt": continue data[i] = (data[i] - data[i].mean()) / (data[i].std()) # Data after scaling data.head() # #### Before going for modelling algorithms, we will create dummy variables for our categorical variables dummy_data = pd.get_dummies(data=data, columns=cat) # Copying dataframe bike_data = dummy_data.copy() dummy_data.head() # ### Machine Learning algorithms # Using train test split functionality for creating sampling X_train, X_test, y_train, y_test = train_test_split( dummy_data.iloc[:, dummy_data.columns != "cnt"], dummy_data.iloc[:, 3], test_size=0.33, random_state=101, ) (X_train.shape), (y_train.shape) # ### Decision Tree Regressor # Importing libraries for Decision Tree from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_squared_error # Building model on top of training dataset fit_DT = DecisionTreeRegressor(max_depth=2).fit(X_train, y_train) # Calculating RMSE for test data to check accuracy pred_test = fit_DT.predict(X_test) rmse_for_test = np.sqrt(mean_squared_error(y_test, pred_test)) def MAPE(y_true, y_pred): mape = np.mean(np.abs((y_true - y_pred) / y_true)) * 100 return mape DT_rmse = rmse_for_test DT_mape = MAPE(y_test, pred_test) DT_r2 = r2_score(y_test, pred_test) print("Decision Tree Regressor Model Performance:") print("Root Mean Squared Error For Test data = " + str(rmse_for_test)) print("R^2 Score(coefficient of determination) = " + str(r2_score(y_test, pred_test))) print("MAPE(Mean Absolute Percentage Error) = " + str(DT_mape)) # Decision Tree Regressor Model Performance: # Root Mean Squared Error For Test data = 997.3873927346699 # R^2 Score(coefficient of determination) = 0.7073525764693427 # MAPE(Mean Absolute Percentage Error) = 25.707144204754727 # ### Random Forest # Importing libraries for Random Forest from sklearn.ensemble import RandomForestRegressor # Building model on top of training dataset fit_RF = RandomForestRegressor(n_estimators=500).fit(X_train, y_train) # Calculating RMSE for test data to check accuracy pred_test = fit_RF.predict(X_test) rmse_for_test = np.sqrt(mean_squared_error(y_test, pred_test)) RF_rmse = rmse_for_test RF_mape = MAPE(y_test, pred_test) RF_r2 = r2_score(y_test, pred_test) print("Random Forest Regressor Model Performance:") print("Root Mean Squared Error For Test data = " + str(rmse_for_test)) print("R^2 Score(coefficient of determination) = " + str(r2_score(y_test, pred_test))) print("MAPE(Mean Absolute Percentage Error) = " + str(RF_mape)) # Random Forest Regressor Model Performance: # Root Mean Squared Error For Test data = 567.4712836267795 # R^2 Score(coefficient of determination) = 0.9052662486980746 # MAPE(Mean Absolute Percentage Error) = 13.33175245911665 # ### Linear Regression # Importing libraries for Linear Regression from sklearn.linear_model import LinearRegression # Building model on top of training dataset fit_LR = LinearRegression().fit(X_train, y_train) # Calculating RMSE for test data to check accuracy pred_test = fit_LR.predict(X_test) rmse_for_test = np.sqrt(mean_squared_error(y_test, pred_test)) LR_rmse = rmse_for_test LR_mape = MAPE(y_test, pred_test) LR_r2 = r2_score(y_test, pred_test) print("Linear Regression Model Performance:") print("Root Mean Squared Error For Test data = " + str(rmse_for_test)) print("R^2 Score(coefficient of determination) = " + str(r2_score(y_test, pred_test))) print("MAPE(Mean Absolute Percentage Error) = " + str(LR_mape)) # Linear Regression Model Performance: # Root Mean Squared Error For Test data = 736.2047259447531 # R^2 Score(coefficient of determination) = 0.8405538055300172 # MAPE(Mean Absolute Percentage Error) = 17.217590042129938 # ### Gradient Boosting Regressor # Importing library for Gradient Boosting from sklearn.ensemble import GradientBoostingRegressor # Building model on top of training dataset fit_GB = GradientBoostingRegressor().fit(X_train, y_train) # Calculating RMSE for test data to check accuracy pred_test = fit_GB.predict(X_test) rmse_for_test = np.sqrt(mean_squared_error(y_test, pred_test)) GBR_rmse = rmse_for_test GBR_mape = MAPE(y_test, pred_test) GBR_r2 = r2_score(y_test, pred_test) print("Gradient Boosting Regressor Model Performance:") print("Root Mean Squared Error For Test data = " + str(rmse_for_test)) print("R^2 Score(coefficient of determination) = " + str(r2_score(y_test, pred_test))) print("MAPE(Mean Absolute Percentage Error) = " + str(GBR_mape)) # Gradient Boosting Regressor Model Performance: # Root Mean Squared Error For Test data = 575.7689853723047 # R^2 Score(coefficient of determination) = 0.9024755542385117 # MAPE(Mean Absolute Percentage Error) = 13.039727726693526 # ### Final Results for all models dat = { "Model_name": [ "Decision tree default", "Random Forest Default", "Linear Regression", "Gradient Boosting Default", ], "RMSE": [DT_rmse, RF_rmse, LR_rmse, GBR_rmse], "MAPE": [DT_mape, RF_mape, LR_mape, GBR_mape], "R^2": [DT_r2, RF_r2, LR_r2, GBR_r2], } results = pd.DataFrame(data=dat) results # ### Random Forest CV # Importing essential libraries from sklearn.ensemble import RandomForestRegressor from sklearn.datasets import make_regression ##Random Search CV from sklearn.model_selection import RandomizedSearchCV RRF = RandomForestRegressor(random_state=0) n_estimator = list(range(1, 20, 2)) depth = list(range(1, 100, 2)) # Create the random grid rand_grid = {"n_estimators": n_estimator, "max_depth": depth} randomcv_rf = RandomizedSearchCV( RRF, param_distributions=rand_grid, n_iter=5, cv=5, random_state=0 ) randomcv_rf = randomcv_rf.fit(X_train, y_train) predictions_RRF = randomcv_rf.predict(X_test) predictions_RRF = np.array(predictions_RRF) view_best_params_RRF = randomcv_rf.best_params_ best_model = randomcv_rf.best_estimator_ predictions_RRF = best_model.predict(X_test) # R^2 RRF_r2 = r2_score(y_test, predictions_RRF) # Calculating MSE RRF_mse = np.mean((y_test - predictions_RRF) ** 2) # Calculate MAPE RRF_mape = MAPE(y_test, predictions_RRF) print("Random Search CV Random Forest Regressor Model Performance:") print("Best Parameters = ", view_best_params_RRF) print("R-squared = {:0.2}.".format(RRF_r2)) print("MSE = ", round(RRF_mse)) print("MAPE = {:0.4}%.".format(RRF_mape)) print("**********************************************") ### END OF CODE ###
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. path = "/kaggle/input/predicting-a-pulsar-star/pulsar_stars.csv" stars = pd.read_csv(path) print("size of our data : ", len(stars)) stars.head() print("our data columns :\n", stars.columns) from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder def encod(cat_feat, df): encoder = LabelEncoder() for each in cat_feat: fea = each + "_cat" encoded = encoder.fit_transform(df[each]) df[fea] = encoded stars2 = stars.copy() cat = stars2.columns[4:8] stars2[cat] = stars2[cat].astype(int) stars2.head() encod(cat, stars2) stars2.head() fr = 0.1 vsize = int(len(stars) * fr) train = stars2[: -2 * vsize] valid = stars2[-2 * vsize : -vsize] test = stars2[:-vsize] for each in [train, valid, test]: print(f"Percentage of target values : {stars.target_class.mean():.4f}") import lightgbm as lgb def categ(feat_cols, train, valid, test): dtrain = lgb.Dataset(data=train[feat_cols], label=train["target_class"]) dvalid = lgb.Dataset(data=valid[feat_cols], label=valid["target_class"]) dtest = lgb.Dataset(data=test[feat_cols], label=test["target_class"]) param = {"num_leaves": 64, "objectives": "binary"} param["metric"] = "auc" num_round = 500 bst = lgb.train( param, dtrain, num_round, valid_sets=[dvalid], early_stopping_rounds=10 ) return bst feat_cols = stars2.columns[9:12] res = categ(feat_cols, train, valid, test) from sklearn import metrics ypred = res.predict(test[feat_cols]) score = metrics.roc_auc_score(test["target_class"], ypred) print(f"our score is: {score}")
# # Introduction to BDTs import pandas import numpy as np import matplotlib.pyplot as plt import json # Load data hd_Signal = pandas.read_hdf("../input/Signal.h5", "df") hd_Background = pandas.read_hdf("../input/Background.h5", "df") hd_Signal.head() # Select set of variables variablelist = [ "nJets_OR_T", "nJets_OR_T_MV2c10_70", "Mll01", "minDeltaR_LJ_0", "minDeltaR_LJ_1", "max_eta", "lep_Pt_1", "MET_RefFinal_et", "DRll01", ] # Plot distributions for two classes of events: Signal and Background fig, ax = plt.subplots(3, 4, figsize=(25, 15)) nbins = 50 varcounter = -1 for i, axobjlist in enumerate(ax): for j, axobj in enumerate(axobjlist): varcounter += 1 if varcounter < len(variablelist): var = variablelist[varcounter] p_Signal = pandas.DataFrame({var: hd_Signal[var]}) p_Background = pandas.DataFrame({var: hd_Background[var]}) # b.replace([np.inf, -np.inf], np.nan, inplace=True) # c.replace([np.inf, -np.inf], np.nan, inplace=True) # b = b.dropna() # c = c.dropna() minval = np.amin(p_Signal[var]) maxval = max([np.amax(p_Signal[var]), np.amax(p_Background[var])]) * 1.4 binning = np.linspace(minval, maxval, nbins) axobj.hist( p_Signal[var], binning, histtype="step", label="Signal", density=1 ) # color='orange', axobj.hist( p_Background[var], binning, histtype="step", label="Background", density=1, ) # color='b', axobj.legend() axobj.set_yscale("log", nonposy="clip") axobj.set_title(variablelist[varcounter]) else: axobj.axis("off") plt.tight_layout() plt.show() # Prepare dataset for BDT training Signal_vars = hd_Signal[variablelist] Background_vars = hd_Background[variablelist] X = np.concatenate((Signal_vars, Background_vars)) # training data y = np.concatenate( (np.ones(Signal_vars.shape[0]), np.zeros(Background_vars.shape[0])) ) # class lables from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve, auc # split data to train and test samples X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) bdt_0 = AdaBoostClassifier( DecisionTreeClassifier( max_depth=4, max_features="auto", min_samples_split=10, min_samples_leaf=10 ), n_estimators=100, learning_rate=0.5, ) bdt_0.fit(X_train, y_train) y_predicted_0 = bdt_0.predict(X_test) print( classification_report(y_test, y_predicted_0, target_names=["signal", "background"]) ) print( "Area under ROC curve: %.4f" % (roc_auc_score(y_test, bdt_0.decision_function(X_test))) ) import xgboost # bdt_xgb = xgboost.XGBClassifier(tree_method="hist", thread_count=-1) bdt_xgb = xgboost.XGBClassifier( tree_method="hist", thread_count=-1, max_depth=3, learning_rate=0.1, n_estimators=1000, verbosity=1, objective="binary:logistic", booster="gbtree", n_jobs=1, gamma=0, min_child_weight=1, ) # , max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1, colsample_bynode=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5, random_state=0, missing=None, gpu_id=-1, **kwargs) # agrees to the tth default: # Method_Opt = "!H:!V:NTrees=1000:MinNodeSize=1.5%:BoostType=Grad:Shrinkage=0.10: # UseBaggedBoost:BaggedSampleFraction=0.5:nCuts=20:MaxDepth=2"; bdt_xgb.fit(X_train, y_train) y_predicted_xgb = bdt_xgb.predict(X_test) print( classification_report( y_test, y_predicted_xgb, target_names=["signal", "background"] ) ) xgb_bdt_ROC = roc_auc_score(y_test, bdt_xgb.predict_proba(X_test)[:, 1]) print("XGBoost ROC AUC = {:.3f}".format(xgb_bdt_ROC)) print( "wrt BDT: %.4f" % (xgb_bdt_ROC / roc_auc_score(y_test, bdt_0.decision_function(X_test))) ) # ### Evaluate model time/ROC import time def evaluate_models(models_dict): for model_name, model in models_dict.items(): start = time.time() model.fit(X_train, y_train) end = time.time() print( "{}; train time {:.3f} s; ROC AUC = {:.3f}".format( model_name, end - start, roc_auc_score(y_test, model.predict_proba(X_test)[:, 1]), ) ) mods = {"AdaBoost": bdt_0, "XGboost": bdt_xgb} evaluate_models(mods) fpr = dict() tpr = dict() roc_auc = dict() i = 0 for model_name, model in mods.items(): # for i in range(len(mods.keys())): print(i) # fpr[i], tpr[i], _ = roc_curve(y_test, pred_vec[i]) fpr[i], tpr[i], _ = roc_curve(y_test, model.predict_proba(X_test)[:, 1]) # roc_auc_score(y_test, model.predict_proba(X_test)[:, 1]) roc_auc[i] = auc(fpr[i], tpr[i]) i += 1 # ### Compute ROC curve and ROC area for each class plt.figure() lw = 2 for i in range(len(mods)): plt.plot( fpr[i], tpr[i], lw=lw, label="%s ROC (%0.3f)" % (list(mods.keys())[i], roc_auc[i]), ) # color='darkorange', plt.plot([0, 1], [0, 1], color="navy", lw=lw, linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver operating characteristic example") plt.legend(loc="lower right") plt.show() # ### Display the structure of the tree from xgboost import plot_tree plot_tree(bdt_xgb, rankdir="LR") # plot_tree(bst, num_trees=2),num_trees=1 fig = plt.gcf() fig.set_size_inches(150, 100) # ### Plot discriminant distribution for two classes plot_range = (0, 1) colors = ["orange", "blue"] class_names = ["Signal", "Background"] nbins = 40 for i in range(2): plt.hist( bdt_xgb.predict_proba(X_test)[:, i], nbins, range=plot_range, label="Test %s" % class_names[i], color=colors[i], alpha=0.5, density=True, ) plt.hist( bdt_xgb.predict_proba(X_train)[:, i], nbins, range=plot_range, label="Train %s" % class_names[i], color=colors[i], alpha=0.5, histtype="step", density=True, ) # x1, x2, y1, y2 = plt.axis() # plt.axis((x1, x2, y1, y2 * 1.2)) plt.legend(loc="upper right") plt.ylabel("Samples") plt.xlabel("Score") plt.title("Decision Scores") # ### Overfitting example bdt_xgb_ovf = xgboost.XGBClassifier( tree_method="hist", thread_count=-1, max_depth=20, learning_rate=0.1, n_estimators=100, verbosity=1, objective="binary:logistic", booster="gbtree", ) bdt_xgb_ovf.fit(X_train, y_train) y_predicted_xgb_ovf = bdt_xgb_ovf.predict(X_test) xgb_bdt_ovf_ROC = roc_auc_score(y_test, bdt_xgb_ovf.predict_proba(X_test)[:, 1]) print("XGBoost ROC AUC = {:.3f}".format(xgb_bdt_ovf_ROC)) plot_range = (0, 1) colors = ["orange", "blue"] class_names = ["Signal", "Background"] nbins = 40 for i in range(2): plt.hist( bdt_xgb_ovf.predict_proba(X_test)[:, i], nbins, range=plot_range, label="Test %s" % class_names[i], color=colors[i], alpha=0.5, density=True, ) plt.hist( bdt_xgb_ovf.predict_proba(X_train)[:, i], nbins, range=plot_range, label="Train %s" % class_names[i], color=colors[i], alpha=0.5, histtype="step", density=True, ) # x1, x2, y1, y2 = plt.axis() # plt.axis((x1, x2, y1, y2 * 1.2)) plt.legend(loc="upper right") plt.ylabel("Samples") plt.xlabel("Score") plt.title("Decision Scores") from scipy import stats from scipy.stats import ks_2samp KS_stat = ks_2samp( bdt_xgb.predict_proba(X_test)[:, 1], bdt_xgb.predict_proba(X_train)[:, 1] ) KS_stat_ovf = ks_2samp( bdt_xgb_ovf.predict_proba(X_test)[:, 1], bdt_xgb_ovf.predict_proba(X_train)[:, 1] ) print( "Kolmogorov-Smirnoff statistics for : \n - shallow tree - ", KS_stat, "\n - overfitting model", KS_stat_ovf, ) def empirical_cdf(sample, plotting=True): N = len(sample) rng = max(sample) - min(sample) if plotting: xs = np.concatenate( [ np.array([min(sample) - rng / 3]), np.sort(sample), np.array([max(sample) + rng / 3]), ] ) ys = np.append(np.arange(N + 1) / N, 1) else: xs = np.sort(sample) ys = np.arange(1, N + 1) / N return (xs, ys) xs_test, ys_test = empirical_cdf(bdt_xgb.predict_proba(X_test)[:, 1]) xs_train, ys_train = empirical_cdf(bdt_xgb.predict_proba(X_train)[:, 1]) xs_test_ovf, ys_test_ovf = empirical_cdf(bdt_xgb_ovf.predict_proba(X_test)[:, 1]) xs_train_ovf, ys_train_ovf = empirical_cdf(bdt_xgb_ovf.predict_proba(X_train)[:, 1]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 6)) # fig.suptitle('Horizontally stacked subplots') ax1.set_title("Normal model") ax1.plot(xs_test, ys_test, label="Test Signal", linewidth=3, linestyle=":") ax1.plot(xs_train, ys_train, label="Train Signal") ax1.set_ylabel("c. d. f.") ax1.set_xlabel("Score") ax1.legend() ax2.set_title("Overfitting model") ax2.set_ylabel("c. d. f.") ax2.plot(xs_test_ovf, ys_test_ovf, label="Test Signal") ax2.plot(xs_train_ovf, ys_train_ovf, label="Train Signal") ax2.set_xlabel("Score") ax2.legend() # plt.step(xs_test, ys_test) ##plt.step(xs_train, ys_train) # plt.step(xs_test_ovf, ys_test_ovf) # plt.step(xs_train_ovf, ys_train_ovf) plot_step = 0.2 x_min, x_max = X[:, 4].min(), X[:, 4].max() y_min, y_max = X[:, 6].min(), X[:, 6].max() xx, yy = np.meshgrid( np.arange(x_min, x_max, plot_step * 2), np.arange(y_min, y_max, plot_step * 2) ) # Plot the decision boundaries plt.subplot(121) plt.axis("tight") for i in range(2): idx = np.where(y == i) plt.scatter( X[idx, 4], X[idx, 6], # c=c, cmap=plt.cm.Paired, s=20, edgecolor="k", label="Class %s" % i, ) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.legend(loc="upper right") plt.xlabel("x") plt.ylabel("y") plt.title("Decision Boundary")
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) from sklearn import preprocessing # Any results you write to the current directory are saved as output. df = pd.read_csv( "/kaggle/input/sloan-digital-sky-survey/Skyserver_SQL2_27_2018 6_51_39 PM.csv" ) df.head(10) df.columns labels = df["class"] df.drop("class", inplace=True, axis=1) print(labels.unique()) listt = df.columns dict = [] for i in listt: dict.append((len(df[i].unique()) / df.shape[0], i)) dict = sorted(dict) for i, j in dict: print(i, j) # drop first 2 columns df.drop(["objid", "rerun"], inplace=True, axis=1) dict2 = [] for i in df.columns: dict2.append((df[i].isnull().sum(), i)) dict2 = sorted(dict2, reverse=False) for i, j in dict2: print(i, j) df.describe() from sklearn.preprocessing import StandardScaler ss = StandardScaler() df = ss.fit_transform(df) df1 = pd.DataFrame(df) le = preprocessing.LabelEncoder() labels = le.fit_transform(labels) print(labels) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(df1, labels, test_size=0.33) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) from sklearn import svm clf = svm.SVC(kernel="rbf") clf.fit(X_train, y_train) print(clf.score(X_test, y_test)) from sklearn.metrics import f1_score y_pred = clf.predict(X_test) print(f1_score(y_test, y_pred, average="macro")) print(f1_score(y_test, y_pred, average="micro")) print(f1_score(y_test, y_pred, average="weighted")) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X_train, y_train) print(clf.score(X_test, y_test)) from sklearn.metrics import f1_score y_pred = clf.predict(X_test) print(f1_score(y_test, y_pred, average="macro")) print(f1_score(y_test, y_pred, average="micro")) print(f1_score(y_test, y_pred, average="weighted")) label_df = pd.DataFrame(y_train) zeros = label_df[label_df == 0].count() ones = label_df[label_df == 1].count() two = label_df[label_df == 2].count() import matplotlib.pyplot as plt plt.rcdefaults() import numpy as np import matplotlib.pyplot as plt objects = ("0", "1", "2") y_pos = np.arange(len(objects)) performance = [int(zeros), int(ones), int(two)] plt.bar(y_pos, performance, align="center", alpha=0.5) plt.xticks(y_pos, objects) plt.ylabel("Usage") plt.title("Programming language usage") plt.show() from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(X_train, y_train) from sklearn.metrics import f1_score print(clf.score(X_test, y_test)) y_pred = clf.predict(X_test) print(f1_score(y_test, y_pred, average="macro")) print(f1_score(y_test, y_pred, average="micro")) print(f1_score(y_test, y_pred, average="weighted"))
# #Bellabeat by C # author: "Chinwe O." # date: "2023-04-10" # Bellabeat Case Study in R # ##About Bellabeat # Founded in 2013 by Urška Sršen and Sando Mur, Bellabeat is a wellness technology company producing health-focused products targeted at women. With a range of products including the Bellabeat app, the Leaf and Time wearable trackers, as well as the Spring water bottle, They are a successful small company with the potential to become a more significant player in the global smart device market. # Their sleek well-crafted products inspire women all around the world. Helping them collect data on fitness sleep and reproductive health, Bellabeat empowers women with the information to take charge of their well-being. # Global expansion in 2016 saw the company grow in presence in many countries and online as well, partnering with several online retailers as well as their own company website. They are also invested in advertising, both traditional and digital, to create impact, especially around important marketing dates. # Sršen believes that an analysis of consumer data will uncover more opportunities for growth. She has asked for an analysis of data on one Bellabeat product to gain insight as to how people already use their device. # ##The Ask # 1. What are some trends in smart device usage? # 2. How could these trends apply to Bellabeat customers? # 3. How could these trends help influence Bellabeat marketing strategy? # ##The Business Task # To analyze smart device usage data in order to gain insight into how consumers use non-Bellabeat smart # devices. # Select one Bellabeat product to apply these insights to my presentation. # ##The Key Stakeholders # 1. Urška Sršen: cofounder and Chief Creative Officer. # 2. Sando Mur: cofounder and Bellabeat executive team member. # 3. Bellabeat marketing analytics team. # The Prepare Phase # I am using 18 CSV files from FitBit Fitness Tracker Data by Morbius, available on Kaggle. This dataset contains the contributions personal fitness tracker from thirty Fitbit users consented to the submission of personal tracker data. # Loading my packages library(tidyverse) library(lubridate) library(ggplot2) library(tidyr) library(dplyr) library(knitr) library(readr) # Upload files from FitBit Fitness Tracker Data by Möbius weightLog < -read.csv( "../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv" ) # Viewed all uploaded data sets to establish variables to best use in creating our findings # Clean data # First by counting number of user id # count(distinct(dailyactivity, Id)) count(distinct(dailycalories, Id)) count(distinct(weightLog, Id)) count(distinct(dailysteps, Id)) count(distinct(dailysleep, Id)) count(distinct(dailyintensities, Id)) # All dataframes have 33 users except for dailysleep(24) and weighlog(8). We will exclude weightlog data as the sample sizes are too small. # Limited dailysleep data will be sited in final analysis # Now check for any duplicated data # anyDuplicated(dailyactivity) anyDuplicated(dailycalories) anyDuplicated(dailysteps) anyDuplicated(dailysleep) anyDuplicated(dailyintensities)
# Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) from datetime import datetime import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import folium from folium.plugins import HeatMap print("Last updated: ", datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC")) wc = pd.read_csv("../input/world-coordinates/world_coordinates.csv") df = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv") df # Drop duplicate entries, if any df.drop_duplicates(inplace=True) df # Remove columns not required for study df.drop(["Sno", "Last Update"], axis=1, inplace=True) df.head() # List of affected provinces/states aff_ps = df["Province/State"].unique() print(aff_ps) print("Total:", len(aff_ps)) # Number of cases in each Province/State case_ps = df.groupby("Province/State", as_index=False)[ ["Confirmed", "Deaths", "Recovered"] ].max() with pd.option_context( "display.max_rows", None, "display.max_columns", None ): # Prevent truncation display(case_ps) # Maintain rich formatting by using display() instead of print() # List of affected countries aff_c = df.Country.unique() print(aff_c) print("Total:", len(aff_c)) # Replacing Mainland China with China to make dataset consistent df["Country"].replace({"Mainland China": "China"}, inplace=True) # Number of cases in each Country case_c = df.groupby(["Country", "Date"]).sum().reset_index() case_c = case_c.sort_values("Date", ascending=False) case_c = case_c.drop_duplicates(subset=["Country"]) with pd.option_context("display.max_rows", None, "display.max_columns", None): display( case_c.sort_values("Country")[ ["Country", "Confirmed", "Deaths", "Recovered"] ].reset_index(drop=True) ) # Total number of cases print("Total Confirmed:", case_c["Confirmed"].sum()) print("Total Deaths:", case_c["Deaths"].sum()) print("Total Recovered:", case_c["Recovered"].sum()) # Plot number of cases in different countries plt.rcParams["figure.figsize"] = (16, 8) sns.barplot(x="Country", y="Confirmed", data=case_c) plt.xticks(rotation=90) plt.xlabel("Affected countries", fontsize=15) plt.ylabel("Number of cases", fontsize=15) # Number of cases in different provinces in China case_ps.rename(columns={"Province/State": "Province"}, inplace=True) sns.barplot(x="Province", y="Confirmed", data=case_ps) plt.xticks(rotation=90) plt.xlabel("Affected provinces", fontsize=15) plt.ylabel("Number of cases", fontsize=15) plt.rcParams["figure.figsize"] = (16, 8) # Number of cases in Provinces other than Hubei sns.barplot(x="Province", y="Confirmed", data=case_ps[case_ps.Province != "Hubei"]) plt.xticks(rotation=90) plt.xlabel("Other affected provinces", fontsize=15) plt.ylabel("Number of cases", fontsize=15) plt.rcParams["figure.figsize"] = (16, 8) # Number of cases in countries other than China sns.barplot( x="Country", y="Confirmed", data=case_c[case_c.Country != "China"][case_c.Country != "Others"], ) plt.xticks(rotation=90) plt.xlabel("Other affected countries", fontsize=15) plt.ylabel("Number of cases", fontsize=15) plt.rcParams["figure.figsize"] = (16, 8) # Time-series analysis df_date = df.groupby("Date", as_index=False)[["Confirmed", "Deaths", "Recovered"]].sum() df_date # If Timestamp is required, run the following code # df['Timestamp'] = pd.to_datetime(df['Date']).astype(int)/10**10 df_date["Date"] = pd.to_datetime( df_date["Date"] ).dt.date # Converting date-time to date df_date # Plot the cases plt.subplot(1, 2, 1) plt.plot( "Date", "Confirmed", data=df_date.groupby(["Date"]).sum().reset_index(), color="blue", ) plt.xticks(rotation=60) plt.xlabel("Dates", fontsize=12) plt.ylabel("Number of cases", fontsize=12) plt.legend() plt.subplot(1, 2, 2) plt.plot( "Date", "Deaths", data=df_date.groupby(["Date"]).sum().reset_index(), color="red" ) plt.plot( "Date", "Recovered", data=df_date.groupby(["Date"]).sum().reset_index(), color="green", ) plt.xticks(rotation=60) plt.xlabel("Dates", fontsize=12) plt.ylabel("Number of cases", fontsize=12) plt.legend() plt.rcParams["figure.figsize"] = (18, 8) plt.show() # Merge world coordinates with nCoV dataframe wc_df = pd.merge(wc, case_c, on="Country") wc_df.drop(["Code", "Date", "Deaths", "Recovered"], axis=1, inplace=True) wc_df # Folium Heatmap heatmap = folium.Map(location=[35.861660, 104.195397], zoom_start=3) heat_data = [ [row["latitude"], row["longitude"], row["Confirmed"]] for index, row in wc_df.iterrows() ] # Plot it on the map HeatMap(heat_data).add_to(heatmap) # Display the map heatmap
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from keras.models import Sequential from keras.layers import Dense from keras.utils import np_utils from keras.layers.core import Dropout from keras.layers.core import Flatten from keras.layers.normalization import BatchNormalization from sklearn.utils import class_weight df2 = pd.read_csv("/kaggle/input/bitsf312-lab1/train.csv", sep=",") # size = {"Small" : 1, "Medium" : 2, "Big": 3, "?" : np.nan} # df2.Size = [size[item] for item in df2.Size] for col in df2: for x in range(len(df2[col])): if df2[col][x] == "?": df2[col][x] = np.nan df2["Size"].value_counts() df2.head() df2["Size"].fillna("Medium", inplace=True) df2.head() df2.dtypes df2["Class"].value_counts() df2 = pd.concat([df2, pd.get_dummies(df2["Size"], prefix="size")], axis=1) df2.drop(["Size"], axis=1, inplace=True) df2.head() for col in df2: df2[col] = pd.to_numeric(df2[col]) df2.dtypes for col in df2: df2[col].fillna((df2[col].mean()), inplace=True) plt.figure(figsize=(20, 20)) corr = df2.corr() corr.style.background_gradient(cmap="RdYlGn") dfY = df2["Class"] cols = [0, 2, 4, 5, 11] dftest = df2.drop(df2.columns[cols], axis=1) plt.figure(figsize=(20, 20)) corr = dftest.corr() corr.style.background_gradient(cmap="RdYlGn") dftest dftest = dftest.values y = dfY.values dftest.shape, y.shape from sklearn import preprocessing X = dftest min_max_scaler = preprocessing.MinMaxScaler() X_scale = min_max_scaler.fit_transform(X) X.shape from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X_scale, y, test_size=0.2) dummy_ytrain = np_utils.to_categorical(Y_train) dummy_ytest = np_utils.to_categorical(Y_test) dummy_ytrain model = Sequential() model.add(Dense(10, input_dim=10, activation="relu")) model.add(Dropout(rate=0.25)) model.add(Dense(32, activation="relu")) model.add(Dropout(rate=0.25)) model.add(Dense(32, activation="relu")) # model.add(Dropout(rate=0.2)) # model.add(Dense(64, activation='relu')) model.add(Dropout(rate=0.25)) model.add(Dense(6, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(X_train, dummy_ytrain, epochs=250, validation_split=0.2, batch_size=25) model.evaluate(X_test, dummy_ytest) df1 = pd.read_csv("/kaggle/input/bitsf312-lab1/test.csv", sep=",") df_submit = pd.DataFrame() df_submit["ID"] = df1["ID"] df1 = pd.concat([df1, pd.get_dummies(df1["Size"], prefix="size")], axis=1) df1.drop(["Size"], axis=1, inplace=True) df1.head() cols = [0, 2, 4, 5] df1 = df1.drop(df1.columns[cols], axis=1) df1.head() dtest = df1.values Xtest = dtest min_max_scaler = preprocessing.MinMaxScaler() Xtest_scale = min_max_scaler.fit_transform(Xtest) Xtest.shape y_submit = model.predict_classes(Xtest, batch_size=40) y_submit df_submit["Class"] = y_submit df_submit.to_csv("Test1.csv", index=False)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # data Visualization import seaborn as sns # data Visualization # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # Importing Datasets df_train = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/train.csv") df_test = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/test.csv") df_sub = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/sample_submission.csv") sns.countplot(x=df_train["target"], data=df_train, palette="seismic") plt.title("TARGET DISTRIBUTION", fontsize=20) plt.xlabel("Target Values", fontsize=15) plt.ylabel("Count", fontsize=15) plt.show() df_train.sort_index(inplace=True) df_train.head() y_train = df_train["target"] test_id = df_test["id"] df_train.drop(["target", "id"], axis=1, inplace=True) df_test.drop("id", axis=1, inplace=True) cat_feat_to_encode = df_train.columns.tolist() smoothing = 0.20 import category_encoders as ce oof = pd.DataFrame([]) from sklearn.model_selection import StratifiedKFold for tr_idx, oof_idx in StratifiedKFold( n_splits=5, random_state=1032, shuffle=True ).split(df_train, y_train): ce_target_encoder = ce.TargetEncoder(cols=cat_feat_to_encode, smoothing=smoothing) ce_target_encoder.fit(df_train.iloc[tr_idx, :], y_train.iloc[tr_idx]) oof = oof.append( ce_target_encoder.transform(df_train.iloc[oof_idx, :]), ignore_index=False ) ce_target_encoder = ce.TargetEncoder(cols=cat_feat_to_encode, smoothing=smoothing) ce_target_encoder.fit(df_train, y_train) df_train = oof.sort_index() df_test = ce_target_encoder.transform(df_test) x_train = df_train.iloc[:, :].values x_test = df_test.iloc[:, :].values # Importing the Libraries for ANN import keras from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout # Initilasing the ANN classifier = Sequential() # Now lets creat Neural Networks classifier.add( Dense(units=256, kernel_initializer="uniform", input_dim=23, activation="relu") ) classifier.add(Dense(units=128, kernel_initializer="uniform", activation="relu")) classifier.add(Dense(units=64, kernel_initializer="uniform", activation="relu")) classifier.add(Dense(units=32, kernel_initializer="uniform", activation="relu")) classifier.add(Dense(units=1, kernel_initializer="uniform", activation="sigmoid")) # Compile the ANN from keras.optimizers import adam adam = adam(lr=0.001) classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) # Fitting ANN to the Traning set classifier.fit(x_train, y_train, batch_size=1000, epochs=100) # Predicting the Test set result y_pred = classifier.predict_proba(x_test)[:, 0] # Sumbmission the result df_sub = pd.DataFrame() df_sub["id"] = test_id df_sub["target"] = y_pred df_sub.to_csv("submission.csv", index=False) df_sub.head(20)
# # PitchFork data wrangling and visualisation # Table of Contents # PitchFork data wrangling and visualisation1. Explore data2. Do review scores for an individual artist improve over time, or go down?3. Is the average of score of an artist correlated with the number of reviews? import sqlite3, datetime import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from matplotlib.ticker import MaxNLocator # For Interactive control import ipywidgets as widgets from ipywidgets import interact, interact_manual import cufflinks as cf # For Regression from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures # Import all data with sqlite3.connect("../input/pitchfork-data/database.sqlite") as conn: artists = pd.read_sql("SELECT * FROM artists", conn) content = pd.read_sql("SELECT * FROM content", conn) genres = pd.read_sql("SELECT * FROM genres", conn) labels = pd.read_sql("SELECT * FROM labels", conn) reviews = pd.read_sql("SELECT * FROM reviews", conn) years = pd.read_sql("SELECT * FROM years", conn) # # For presentation, hide code # from IPython.display import HTML # HTML('''<script> # code_show=true; # function code_toggle() { # if (code_show){ # $('div.input').hide(); # } else { # $('div.input').show(); # } # code_show = !code_show # } # $( document ).ready(code_toggle); # </script> # <form action="javascript:code_toggle()"><input type="submit" value="Hide/Show raw code."></form>''') # # 1. Explore data df_list = [artists, content, genres, labels, reviews, years] for df in df_list: display(df.head()) display(df.info()) # Check if there are duplicates reviewid: Yes for df in df_list: print(df["reviewid"].nunique()) # Create datetime column for YYYY-mm reviews["date"] = pd.to_datetime(reviews["pub_date"]) reviews["year_month"] = reviews["date"].dt.strftime("%Y-%m") # Number of reviews over year df_year = reviews.groupby("pub_year")["reviewid"].nunique() fig, ax = plt.subplots(1, 1, figsize=(12, 6)) df_year.plot.bar() # Histogram of scores fig, ax = plt.subplots(1, 1, figsize=(12, 6)) reviews["score"].hist(ax=ax, bins=50, edgecolor="white", grid=False) reviews["score"].describe() # # 2. Do review scores for an individual artist improve over time, or go down? df2 = reviews.groupby(["artist", "year_month"], as_index=False).agg( {"score": np.mean, "reviewid": "nunique"} ) @interact def ind_artist(artist=df2["artist"].unique()): df2_artist = df2[df2["artist"] == artist] df2_artist = df2_artist.set_index("year_month") fig, ax1 = plt.subplots(1, 1, figsize=(12, 6)) # Plot monthly average score df2_artist["score"].plot.bar(ax=ax1, rot=0, color="silver") ax1.set_xlabel("") ax1.set_ylabel("Average Score") ax1.set_title("Average Review Score of Individual Artist Across Time") # Add bar values for p in ax1.patches: ax1.annotate( "{:,.1f}".format(np.round(p.get_height(), decimals=4)), (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", xytext=(0, 10), textcoords="offset points", ) # Remove the frame for spine in ax1.spines.values(): spine.set_visible(False) # Plot monthly review numbers ax2 = ax1.twinx() ax2.plot( df2_artist["reviewid"], color="darkred", linewidth=2, marker="o", markersize=7, markeredgecolor="w", ) ax2.set_ylabel("Number of reviews", color="darkred") # Remove the frame for spine in ax2.spines.values(): spine.set_visible(False) # Force y-axis ticks integer max_review = df2_artist["reviewid"].max() ax2.set_ylim([0, max_review + 1]) ax2.yaxis.set_major_locator(MaxNLocator(integer=True)) # # 3. Is the average of score of an artist correlated with the number of reviews? df3 = reviews.groupby(["artist"], as_index=False).agg( {"score": np.mean, "reviewid": "nunique"} ) df3 = df3.rename(columns={"reviewid": "reviews"}) df3.head() # Remove outliers df3 = df3[df3["reviews"] <= df3["reviews"].quantile(0.99)] fig, ax = plt.subplots(1, 1, figsize=(12, 6)) ax = sns.violinplot(x=df3["reviews"], y=df3["score"]) # Create a bucket of 0.5 points for average scores df3["score_g"] = df3["score"].apply(lambda x: np.ceil(x / 0.5) * 0.5) df3.head() # Plot scatter plot average score for an artist and number of reviews fig, ax = plt.subplots(1, 1, figsize=(8, 6)) ax.scatter(x=df3["reviews"], y=df3["score_g"], s=10, color="b", alpha=0.2) ax.set_xlabel("Number of reviews for an artist") ax.set_ylabel("Average score") ax.set_title("Correlation between average score and number of reviews for an artist") print("Correlation between average score and number of reviews for an artist: ") print(df3[["reviews", "score"]].corr()) # Fit square regression model x = df3["reviews"].values.reshape(-1, 1) y = df3["score_g"].values.reshape(-1, 1) model = PolynomialFeatures(degree=1) x_model = model.fit_transform(x) model.fit(x_model, y) model1 = LinearRegression() model1.fit(x_model, y) # Plotting x_range = np.arange(df3["reviews"].min(), df3["reviews"].max() + 1, 1).reshape(-1, 1) ax.plot( x_range, model1.predict(model.fit_transform(x_range)), color="r", linewidth=1, zorder=1, label="ZFS", )
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) df = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv") df["Country"] = df["Country"].replace("Mainland China", "China") df[df["Country"] == "China"] df = df.drop(["Sno", "Date"], axis=1) df["Last Update"] = pd.to_datetime(df["Last Update"]) df[["Province/State", "Country"]] = df[["Province/State", "Country"]].fillna( "Unavailable" ) df[["Confirmed", "Deaths", "Recovered"]] = df[ ["Confirmed", "Deaths", "Recovered"] ].fillna(0.0) df.head(5) # df[df['Country'] == 'China'] df.shape latest_data_df = ( df.groupby(["Country", "Province/State"])[ "Last Update", "Confirmed", "Deaths", "Recovered" ] .max() .reset_index() ) latest_data_df = latest_data_df[ ["Country", "Province/State", "Confirmed", "Recovered", "Deaths", "Last Update"] ] latest_data_df.shape latest_data_df.head(50) # df[df['Country'] == 'Australia'].tail(5) latest_data_df.head(5) china_df = latest_data_df[latest_data_df["Country"] == "China"].reset_index(drop=True) china_df grouped_cnf_df = ( latest_data_df.groupby(["Country"])["Confirmed", "Recovered", "Deaths"] .sum() .reset_index() ) grouped_cnf_df.head(5) grouped_cnf_df = grouped_cnf_df[ (grouped_cnf_df["Country"] != "China") & (grouped_cnf_df["Country"] != "Others") ] grouped_cnf_df import plotly.express as px init_notebook_mode(connected=True) # do not miss this line fig = px.bar( grouped_cnf_df, x="Confirmed", y="Country", orientation="h", color="Confirmed", height=600, ) fig.update_layout(yaxis={"categoryorder": "total ascending"}) plotly.offline.iplot(fig) import plotly.graph_objects as go fig = go.Figure( go.Bar( x=grouped_cnf_df["Confirmed"], y=grouped_cnf_df["Country"], name="Confirmed", orientation="h", ) ) fig.add_trace( go.Bar( x=grouped_cnf_df["Deaths"], y=grouped_cnf_df["Country"], name="Deaths", orientation="h", ) ) fig.add_trace( go.Bar( x=grouped_cnf_df["Recovered"], y=grouped_cnf_df["Country"], name="Recovered", orientation="h", ) ) fig.update_layout( barmode="stack", yaxis={"categoryorder": "total ascending"}, height=1000 ) fig.show() # China based visualizations china_df.head(5) fig = go.Figure( go.Bar( x=china_df["Confirmed"], y=china_df["Province/State"], name="Confirmed", orientation="h", ) ) fig.add_trace( go.Bar( x=china_df["Deaths"], y=china_df["Province/State"], name="Deaths", orientation="h", ) ) fig.add_trace( go.Bar( x=china_df["Recovered"], y=china_df["Province/State"], name="Recovered", orientation="h", ) ) fig.update_layout( barmode="stack", yaxis={"categoryorder": "total ascending"}, height=1000 ) fig.show() fig = go.Figure( go.Bar( x=china_df[china_df["Province/State"] != "Hubei"]["Confirmed"], y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"], name="Confirmed", orientation="h", ) ) fig.add_trace( go.Bar( x=china_df[china_df["Province/State"] != "Hubei"]["Deaths"], y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"], name="Deaths", orientation="h", ) ) fig.add_trace( go.Bar( x=china_df[china_df["Province/State"] != "Hubei"]["Recovered"], y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"], name="Recovered", orientation="h", ) ) fig.update_layout( barmode="stack", yaxis={"categoryorder": "total ascending"}, height=1000 ) fig.show() from plotly.subplots import make_subplots fig = make_subplots( rows=2, cols=1, start_cell="bottom-left", row_heights=[0.96, 0.04], vertical_spacing=0.09, ) fig.add_trace( go.Bar( x=china_df[china_df["Province/State"] != "Hubei"]["Confirmed"], y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"], name="Confirmed", orientation="h", ), row=1, col=1, ) fig.add_trace( go.Bar( x=china_df[china_df["Province/State"] != "Hubei"]["Deaths"], y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"], name="Deaths", orientation="h", ), row=1, col=1, ) fig.add_trace( go.Bar( x=china_df[china_df["Province/State"] != "Hubei"]["Recovered"], y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"], name="Recovered", orientation="h", ), row=1, col=1, ) fig.add_trace( go.Bar( x=china_df[china_df["Province/State"] == "Hubei"]["Confirmed"], y=china_df[china_df["Province/State"] == "Hubei"]["Province/State"], name="Confirmed", orientation="h", ), row=2, col=1, ) fig.add_trace( go.Bar( x=china_df[china_df["Province/State"] == "Hubei"]["Deaths"], y=china_df[china_df["Province/State"] == "Hubei"]["Province/State"], name="Deaths", orientation="h", ), row=2, col=1, ) fig.add_trace( go.Bar( x=china_df[china_df["Province/State"] == "Hubei"]["Recovered"], y=china_df[china_df["Province/State"] == "Hubei"]["Province/State"], name="Recovered", orientation="h", ), row=2, col=1, ) fig.update_layout( showlegend=False, barmode="stack", yaxis={"categoryorder": "total ascending"}, height=700, ) fig.show() import plotly.express as px data = px.data.gapminder() data_canada = data[data.country == "Canada"] fig = px.bar( data_canada, x="year", y="pop", hover_data=["lifeExp", "gdpPercap"], color="lifeExp", labels={"pop": "population of Canada"}, height=400, ) fig.show()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt from scipy import stats from sklearn.model_selection import KFold from sklearn.preprocessing import MinMaxScaler from sklearn.svm import SVR from sklearn import * import datetime as dt #### Import Dependencies #### Start Python Imports import math, time, random, datetime #### Data Manipulation import numpy as np import pandas as pd #### Visualization import matplotlib.pyplot as plt import missingno import seaborn as sns plt.style.use("seaborn-whitegrid") #### Preprocessing from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize #### Machine learning import catboost from sklearn.model_selection import train_test_split from sklearn import model_selection, tree, preprocessing, metrics, linear_model from sklearn.svm import LinearSVC from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LinearRegression, LogisticRegression, SGDClassifier from sklearn.tree import DecisionTreeClassifier from catboost import CatBoostClassifier, Pool, cv ##### Let's be rebels and ignore warnings for now import warnings warnings.filterwarnings("ignore") train = pd.read_csv("../input/cat-in-the-dat/train.csv") test = pd.read_csv("../input/cat-in-the-dat/test.csv") print("Data is loaded!") def RMSLE(y, pred): return metrics.mean_squared_error(y, pred) ** 0.5 data = train.copy() valid = test.copy() # data.nunique() # valid.nunique() # in case needs # get a list of object cat columns # Get list of categorical variables s = data.dtypes == "object" object_cols = list(s[s].index) print("Categorical variables:") print(object_cols) len(object_cols) # We will seperate the object columns that should be one hot encoded (< 12 unique values) and columns that should be label encoded (rest of the object categorical columns) OH_col = data.loc[:, data.nunique() < 15].columns new_OH = [] for x in OH_col: if x in object_cols: new_OH.append(x) # new_OH LE_col = data.loc[:, data.nunique() >= 15].columns new_LE = [] for x in LE_col: if x in object_cols: new_LE.append(x) # new_LE # ### Lebel encoding : inplace # Make copy to avoid changing original data label_X_train = data.copy() label_X_valid = valid.copy() # Apply label encoder to each column with categorical data label_encoder = LabelEncoder() for col in new_LE: label_X_train[col] = label_encoder.fit_transform(data[col]) label_X_valid[col] = label_encoder.fit_transform(valid[col]) print(label_X_train.shape) print(label_X_valid.shape) label_X_train.head(2) label_X_valid.head(2) # use label_X_train and label_X_valid for next calculations ( One hot encoding ) # ### * One Hot encoding # label_X_train[new_OH].nunique() # Apply one-hot encoder to each column with categorical data OH_encoder = OneHotEncoder(handle_unknown="ignore", sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(label_X_train[new_OH])) OH_cols_valid = pd.DataFrame(OH_encoder.fit_transform(label_X_valid[new_OH])) ## check if fit_transform or just transform should be used.... for valid data set.... print(OH_cols_train.shape) print(OH_cols_valid.shape) label_X_train[new_OH].nunique().sum() # means OH_cols_train has no data of rest of columns.... # so now add the data back # One-hot encoding removed index; put it back OH_cols_train.index = label_X_train.index OH_cols_valid.index = label_X_valid.index # Remove categorical columns (will replace with one-hot encoding) # these are columns which has numerical data and lebel encoding columns that's been processed already. num_X_train = label_X_train.drop(new_OH, axis=1) num_X_valid = label_X_valid.drop(new_OH, axis=1) # num_X_train.head(2) # num_X_valid.head(2) # Add one-hot encoded columns to numerical features OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1) OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1) # OH_X_train.head(2) # OH_X_valid.head(2) print(OH_X_train.shape) print(OH_X_valid.shape) # > ### * ML Algo from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.utils.testing import ignore_warnings rf = RandomForestClassifier(n_estimators=200, n_jobs=-1, verbose=2) # model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) lr1 = LogisticRegression(solver="lbfgs", C=0.1) X_train = OH_X_train.drop("target", axis=1) y_train = OH_X_train["target"] X_train = X_train.drop("id", axis=1) X_test = OH_X_valid.drop("id", axis=1) # scaler = MinMaxScaler(feature_range=(0, 1)) # X_train = scaler.fit_transform(X_train) # X_test = scaler.fit_transform(X_test) print(X_train.shape) print(y_train.shape) # rf.fit(X_train, y_train) # lr1.fit(X_train, y_train) # alternate cv method X, X_hideout, y, y_hideout = model_selection.train_test_split( X_train, y_train, test_size=0.13, random_state=42 ) # Set up folds K = 4 kf = model_selection.KFold(n_splits=K, random_state=1, shuffle=True) np.random.seed(1) # model = SVR(kernel='rbf') params = { "n_estimators": 10, # change to 9000 to obtain 0.505 on LB (longer run time expected) "max_depth": 5, "min_samples_split": 200, "min_samples_leaf": 50, "learning_rate": 0.005, "max_features": "sqrt", "subsample": 0.8, "loss": "ls", } # model = ensemble.GradientBoostingRegressor(**params) model = ensemble.RandomForestClassifier(n_jobs=-1, verbose=2) print("Started CV at ", dt.datetime.now()) for i, (train_index, test_index) in enumerate(kf.split(X)): # Create data for this fold y_train, y_valid = y.iloc[train_index].copy(), y.iloc[test_index] X_train, X_valid = X.iloc[train_index, :].copy(), X.iloc[test_index, :].copy() # X_test = test[col] print("\nFold ", i) fit_model = model.fit(X_train, y_train) pred = model.predict(X_valid) print("RMSLE GBM Regressor, validation set, fold ", i, ": ", RMSLE(y_valid, pred)) pred_hideout = model.predict(X_hideout) print( "RMSLE GBM Regressor, hideout set, fold ", i, ": ", RMSLE(y_hideout, pred_hideout), ) print( "Prediction length on validation set, GBM Regressor, fold ", i, ": ", len(pred) ) # Accumulate test set predictions del X_train, X_valid, y_train print("Finished CV at ", dt.datetime.now()) # scores = [] # best_svr = SVR(kernel='rbf') # #random_state=42, shuffle=False # cv = KFold(n_splits=10) # for train_index, test_index in cv.split(X_train): # print("Train Index: ", train_index, "\n") # print("Test Index: ", test_index) # X_tr = X_train.iloc[train_index,:] # X_tes = X_train.iloc[test_index,:] # y_tr = y_train.iloc[train_index] # y_tes = y_train.iloc[test_index] # print(X_tr.shape) # print(X_tes.shape) # print(y_tr.shape) # print(y_tes.shape) # #best_svr.fit(X_tr, y_tr) # #scores.append(best_svr.score(X_tes, y_tes)) # X_train.iloc[[1,3],:] # y_train.iloc[30000] X_test.head(2) # predictions = rf.predict(X_test) # predict_lr = lr1.predict_proba(X_test) # prediction_svr = best_svr.predict(X_test) # submission = pd.DataFrame() # submission_LR = pd.DataFrame() # submission_svr = pd.DataFrame() # submission["id"] = OH_X_valid["id"] # submission_LR["id"] = OH_X_valid["id"] # submission_svr['id'] = OH_X_valid["id"] # submission["target"] = predictions # submission_LR["target"] = predict_lr[:, 1] # submission_svr["target"] = prediction_svr prediction = model.predict(X_test) submission = pd.DataFrame() submission["id"] = OH_X_valid["id"] submission["target"] = prediction submission.to_csv("cat_submission1.csv", index=False) predict_lr[:, 1] submission.target.value_counts().sum() submission.to_csv("cat_submission1.csv", index=False) submission_LR.to_csv("cat_submission_lr.csv", index=False) from sklearn.model_selection import cross_validate score = cross_validate(lr1, X_train, y_train, cv=3, scoring="roc_auc")[ "test_score" ].mean() print(f"{score:.6f}")
# All data are taken at 250 Hz # Importing Libraries Requird for the Preprocessing import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import scipy.io from math import pi from scipy.fftpack import fft import scipy.signal as sig import os import math # It will load the data into matrix def load_data(): f = [] for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: f.append(os.path.join(dirname, filename)) return f # Calling the load function f = load_data() # showing the data for k in f: print(k) # this contain two datatset # Datatset 1 contain sos, stop, and medicine # Dataset 3 contain come here and washroom # In extract raw data function we seprated them into Mdata_1 and Mdata_3 def extract_raw_data(): Mdata_1 = [] Mdata_3 = [] for i in range(len(f)): mat = scipy.io.loadmat(f[i]) col = mat["labels"] data = mat["data"] col = [j for j in col] # pdata is a dataframe pdata = pd.DataFrame(data, columns=col) if i < 13: Mdata_3.append(pdata) else: Mdata_1.append(pdata) return Mdata_1, Mdata_3 # Calling function to extract dataframe into Mdata_1 and Mdata_3 # Mdata_1 contains:: # + SOS - SOS - SOS + STOP - STOP - STOP - + MEDICINE - MEDICINE - MEDICINE # +: 2sec, words:1sec, blank: 2sec # Mdata_3 contains:: # + COME_HERE - COME_HERE - COME_HERE + WASHROOM - WASHROOM - WASHROOM # +: 2sec, words:1sec, blanks:2sec Mdata_1, Mdata_3 = extract_raw_data() # showing shape of dataframe of each subject(13 subjects) # for Mdata_1 : for i in Mdata_1: print(i.shape) # ////////////////////rough plt.rcParams["figure.figsize"] = (20, 20) Fs = 250 # f=20 # x=np.sin(2*pi*f*t)#+0.5*np.sin(2*pi*40*t)+0.5*np.sin(2*pi*80*t) # Generate Noise # y=0.25*np.sin(2*pi*50*t) # y=np.array([50]*len(t)) # x=x+y;#Noisy Signal x = Mdata_1[0]["ExG1"] / (max(abs(Mdata_1[0]["ExG1"]))) # t=np.arange(0,,1/Fs) plt.figure(1) plt.subplot(2, 1, 1) plt.plot(x) plt.title("sinusodial wave") plt.xlabel("Time(s)") plt.ylabel("Amplitude") # spectral Analysis # Compute FFT X = fft(x) # Generate Frequency axis # n=np.size(t) # fr=(Fs/2)*np.linspace(0,9,(n//2)) x_m = abs(X) / (max(abs(X))) # print((fr)) plt.subplot(2, 1, 2) plt.plot((pow(10, x_m))) plt.title("Magnitude spectrum") plt.xlabel("Frequency") plt.ylabel("Magnitude") plt.tight_layout() # make dataframe of equal size of Mdata_1 for i in range(len(Mdata_1)): Mdata_1[i] = Mdata_1[i][0:7250] print(Mdata_1[i].shape) # showing shape of dataframe of each subject(13 subjects) # for Mdata_3 : for i in Mdata_3: print(i.shape) # making dataframe of equal size Mdata_3 for i in range(len(Mdata_3)): Mdata_3[i] = Mdata_3[i][0:4500] print(Mdata_3[i].shape) # Collecting the useful data where subject were thinking the words # for Mdata_1 def seg_data1(): f = 250 u1 = [] for n in range(len(Mdata_1)): a = 2 * f g = pd.DataFrame() for i in range(9): if i == 5: h = Mdata_1[n][a : a + f] a += 2 * f else: h = Mdata_1[n][a : a + f] g = pd.concat([g, h]) a += 3 * f u1.append(g) return u1 # Collecting the useful data where subject were thinking the words # for Mdata_3 def seg_data3(): f = 250 u3 = [] for n in range(len(Mdata_1)): a = 2 * f g = pd.DataFrame() for i in range(6): h = Mdata_1[n][a : a + f] g = pd.concat([g, h]) a += 3 * f u3.append(g) return u3 data1 = seg_data1() data3 = seg_data3() data1[0]["ExG5"].shape plt.rcParams["figure.figsize"] = (20, 150) a, b = plt.subplots(30) loopi = 0 for y in list(data1[0].columns.values): b[loopi].plot([i for i in range(len(data1[1][y]))], data1[1][y]) loopi += 1 plt.rcParams["figure.figsize"] = (20, 10) t = [i for i in range(len(data1[0]["ExG5"]))] plt.plot(t, data1[0]["ExG5"]) plt.rcParams["figure.figsize"] = (20, 20) Fs = 250 t = np.arange(0, 9, 1 / Fs) # f=20 # x=np.sin(2*pi*f*t)#+0.5*np.sin(2*pi*40*t)+0.5*np.sin(2*pi*80*t) # Generate Noise # y=0.25*np.sin(2*pi*50*t) # y=np.array([50]*len(t)) # x=x+y;#Noisy Signal x = data1[0]["ExG1"] / (max(abs(data1[0]["ExG1"]))) plt.figure(1) plt.subplot(2, 1, 1) plt.plot(t, x) plt.title("sinusodial wave") plt.xlabel("Time(s)") plt.ylabel("Amplitude") # spectral Analysis # Compute FFT X = fft(x) # Generate Frequency axis n = np.size(t) fr = (Fs / 2) * np.linspace(0, 9, (n // 2)) x_m = abs(X[0 : n // 2]) # print((fr)) plt.subplot(2, 1, 2) plt.plot(fr, ((x_m))) plt.title("Magnitude spectrum") plt.xlabel("Frequency") plt.ylabel("Magnitude") plt.tight_layout() import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from math import pi from scipy.fftpack import fft import scipy.signal as sig y = data1[0]["ExG1"] / (max(abs(data1[0]["ExG1"]))) x = [i for i in range(len(y))] rolling_mean = y.rolling(window=5).mean() rolling_mean2 = y.rolling(window=10).mean() plt.plot(x, y, label="AMD") plt.plot(x, rolling_mean, label="5 window size maf", color="orange") plt.plot(x, rolling_mean2, label="10 window size maf", color="magenta") plt.legend(loc="upper left") plt.show() for i in x_m: print(i) plt.plot([i for i in range(len(x_m))], x_m)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.utils import to_categorical from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, Conv2D, Flatten, MaxPool2D, Dropout, BatchNormalization, AveragePooling2D, BatchNormalization, ) from sklearn.utils import shuffle from tensorflow.keras.optimizers import Adam import random train = pd.read_csv("/kaggle/input/Kannada-MNIST/train.csv") test = pd.read_csv("/kaggle/input/Kannada-MNIST/test.csv") validation = pd.read_csv("/kaggle/input/Kannada-MNIST/Dig-MNIST.csv") def prepare_data(features, target=None): """ here is a helper function to resize training data to (28,28,1) and to categorize the target variable """ # shuffle data first features, target = shuffle(features, target) x = features.values.reshape((len(features), 28, 28, 1)) y = to_categorical(target) print(y.shape) # normalize x = x / 255.0 return (x, y) X, y = train.loc[:, train.columns != "label"], train.label X_val, y_val = validation.loc[:, validation.columns != "label"], validation.label print( "We have {} training examples and {} validation examples.".format( X.shape[0], X_val.shape[0] ) ) print( "X_train: {} | y_train: {} |\nX_val: {} | y_val: {}".format( X.shape, y.shape, X_val.shape, y_val.shape ) ) x_train, y_train = prepare_data(X, y) x_val, y_val = prepare_data(X_val, y_val) print( "X_train: {} | y_train: {} |\nX_val: {} | y_val: {}".format( x_train.shape, y_train.shape, x_val.shape, y_val.shape ) ) # # data augmentation # Initialising the ImageDataGenerator class. # We will pass in the augmentation parameters in the constructor. datagen = ImageDataGenerator(zoom_range=0.15, rotation_range=2) model = Sequential() # LeNet # Input Image Dimensionns : 28x28x1 # 1. Conv2D - kernel : 11x11x96. strides=4,4;padding-valid model.add( Conv2D( filters=6, kernel_size=(5, 5), strides=(1, 1), padding="valid", activation="relu", input_shape=(28, 28, 1), ) ) model.add(Dropout(0.2)) model.add(BatchNormalization()) # 2. Average Pool - kernel - 2x2, strides = 2,2 model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="valid")) # 3. Conv2d - kernel - 5x5x16, strides = 1,1 model.add( Conv2D( filters=16, kernel_size=(5, 5), strides=(1, 1), padding="valid", activation="relu", ) ) model.add(Dropout(0.2)) model.add(BatchNormalization()) # 4. Average Pool - kernel - 2x2, strides = 2,2 model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="valid")) # # 5. Convolution - kernel - 5x5x120 ,strides = 1,1 # model.add(Conv2D(filters=120,kernel_size=(4,4),strides=(1,1),padding='valid',activation='relu')) # model.add(BatchNormalization()) model.add(Flatten()) # 6. Fully connected layer - 84 nodes model.add(Dense(84, activation="relu")) # 7. Output layer - 10 nodes model.add(Dense(10, activation="softmax")) # Compile optimizer = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999) model.compile( optimizer=optimizer, loss=["categorical_crossentropy"], metrics=["accuracy"] ) # # Train # model.fit(x_train,y_train,epochs=10,validation_data=(x_val,y_val)) model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val)) test = pd.read_csv("/kaggle/input/Kannada-MNIST/test.csv") test_features = test.iloc[0:, test.columns != "id"] test_features = test_features.values.reshape((len(test_features), 28, 28, 1)) test_features = test_features.astype("float") test_features = test_features / 255.0 predictions = model.predict(test_features) predictions = np.argmax(predictions, axis=1) predictions.shape final_df = pd.DataFrame({"id": test["id"], "label": predictions}) final_df.head() final_df.to_csv("submission.csv", index=False)
# # CFG Python Data Challenge - Analyse Sales Data import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # pd.options.display.float_format = '{:,}'.format # pd.set_option('precision', 2) pd.options.display.float_format = "{:,.2f}".format fpath = "/kaggle/input/sales-data/sales_dataset (3).csv" df = pd.read_csv(fpath) display(df.head()) display(df.describe()) print(df.shape) print(df.info()) # Calculate total sales for each product df["Total Sales"] = df["Sale Price"] * df["Quantity Sold"] df_sorted = df.sort_values("Total Sales", ascending=False) display(df_sorted) total_sales = sum(df["Total Sales"]) print("Total sales across all products for the year: £{:,}.".format(total_sales)) category_price_mean = df.groupby("Category").mean()[["Sale Price"]] category_price_mean.columns = ["Sale Price Mean"] category_price_median = df.groupby("Category").median()[["Sale Price"]] category_price_median.columns = ["Sale Price Median"] category_price_std = df.groupby("Category").std()[["Sale Price"]] category_price_std.columns = ["Sale Price STD"] category_sale_count = df.groupby("Category").sum()[["Quantity Sold"]] category_sale_count.columns = ["Quantity Sold"] category_df = pd.concat( [ category_price_mean, category_price_median, category_price_std, category_sale_count, ], axis=1, ) display(category_df.sort_values("Sale Price Mean", ascending=False)) # month with the highest and lowest sales sales_by_month = ( df.groupby("Month") .sum()[["Total Sales"]] .sort_values("Total Sales", ascending=False) ) display(sales_by_month) # February was the month with the highest sales, October had the lowest sales. # Would be nice to have an area plot with total sales, stacking each category. # Sort month Jan - Dec # customers with highest purchases purchases_by_customer = df.groupby("Customer Name").sum()[ ["Total Sales", "Quantity Sold"] ] display(purchases_by_customer.sort_values("Total Sales", ascending=False)) # display(purchases_by_customer.sort_values('Quantity Sold',ascending = False)) # Write results of analysis to csv file # Visualise results
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # Import all required library import pandas as pd import numpy as np import os # to save model import pickle # Import visualization modules import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report data = pd.read_csv( "/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv" ) data_1.head(5) data.describe() # checking if there is any NULL data missing_values = data.isnull() missing_values.head(5) # checking via Heat Map sns.heatmap(data=missing_values, yticklabels=False, cbar=False, cmap="viridis")
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import matplotlib.pyplot as plt import seaborn as sns sns.set() from scipy import stats DATA_DIR = "/kaggle/input/house-prices-advanced-regression-techniques/" train_df = pd.read_csv(DATA_DIR + "train.csv") test_df = pd.read_csv(DATA_DIR + "test.csv") train_df test_df sns.distplot(train_df["SalePrice"]) fig = plt.figure() res = stats.probplot(train_df["SalePrice"], plot=plt) train_df["SalePrice"].describe() lowerbound, upperbound = np.percentile(train_df["SalePrice"], [0.5, 99.5]) print(lowerbound, upperbound) train_df = train_df.drop( train_df[ (train_df["SalePrice"] < lowerbound) | (train_df["SalePrice"] > upperbound) ].index ) train_df["SalePrice"].describe() # SCALE TARGET VARIABLE train_df["SalePrice"] = np.log1p(train_df["SalePrice"]) corr_matrix = train_df.corr() corr_matrix.sort_values(by="SalePrice", inplace=True, axis=1, ascending=False) plt.figure(figsize=(25, 25)) sns.heatmap(corr_matrix, square=True, annot=True, fmt="0.2f") sns.scatterplot(x=train_df["OverallQual"], y=train_df["SalePrice"]) train_df = train_df.drop(train_df[train_df["OverallQual"] <= 2].index) train_df = train_df.drop( train_df[(train_df["OverallQual"] == 10) & (train_df["SalePrice"] < 12.5)].index ) train_df = train_df.drop( train_df[(train_df["OverallQual"] == 4) & (train_df["SalePrice"] > 12.3)].index ) train_df = train_df.drop( train_df[(train_df["OverallQual"] == 7) & (train_df["SalePrice"] < 11.5)].index ) sns.scatterplot(x=train_df["OverallQual"], y=train_df["SalePrice"]) sns.scatterplot(x=train_df["GrLivArea"], y=train_df["SalePrice"]) train_df = train_df.drop( train_df[(train_df["GrLivArea"] > 3300) & (train_df["SalePrice"] < 12.5)].index ) sns.scatterplot(x=train_df["GarageCars"], y=train_df["SalePrice"]) sns.scatterplot(x=train_df["GarageArea"], y=train_df["SalePrice"]) train_df = train_df.drop(train_df[train_df["GarageArea"] > 1230].index) sns.scatterplot(x=train_df["GarageYrBlt"], y=train_df["SalePrice"], alpha=0.6) train_df["GarageTotal"] = train_df["GarageArea"] * train_df["GarageCars"] test_df["GarageTotal"] = test_df["GarageArea"] * test_df["GarageCars"] sns.scatterplot(x=train_df["GarageTotal"], y=train_df["SalePrice"]) train_df = train_df.drop(train_df[train_df["GarageTotal"] > 3750].index) train_df = train_df.drop( train_df[(train_df["SalePrice"] < 11.7) & (train_df["GarageTotal"] > 2000)].index ) train_df.drop(["GarageArea", "GarageCars"], axis=1, inplace=True) test_df.drop(["GarageArea", "GarageCars"], axis=1, inplace=True) sns.scatterplot(x=train_df["TotalBsmtSF"], y=train_df["SalePrice"], alpha=0.6) train_df = train_df.drop(train_df[train_df["TotalBsmtSF"] > 3000].index) train_df = train_df.drop( train_df[(train_df["SalePrice"] < 11.1) & (train_df["TotalBsmtSF"] > 1000)].index ) sns.scatterplot(x=train_df["YearBuilt"], y=train_df["SalePrice"], alpha=0.6) sns.scatterplot(x=train_df["FullBath"], y=train_df["SalePrice"], alpha=0.6) train_df = train_df.drop( train_df[(train_df["SalePrice"] < 11.2) & (train_df["FullBath"] == 2)].index ) train_df = train_df.drop( train_df[(train_df["SalePrice"] > 12.8) & (train_df["FullBath"] <= 1)].index ) sns.scatterplot(x=train_df["YearRemodAdd"], y=train_df["SalePrice"], alpha=0.6) train_df["RemodToSold"] = train_df["YrSold"] - train_df["YearRemodAdd"] test_df["RemodToSold"] = test_df["YrSold"] - test_df["YearRemodAdd"] sns.scatterplot(x=train_df["RemodToSold"], y=train_df["SalePrice"], alpha=0.6) train_df.drop(["YrSold", "YearRemodAdd"], axis=1, inplace=True) test_df.drop(["YrSold", "YearRemodAdd"], axis=1, inplace=True) sns.scatterplot(x=train_df["Fireplaces"], y=train_df["SalePrice"], alpha=0.6) sns.scatterplot(x=train_df["MasVnrArea"], y=train_df["SalePrice"], alpha=0.6) train_df = train_df.drop( train_df[(train_df["SalePrice"] < 12.5) & (train_df["MasVnrArea"] > 1000)].index ) train_df = train_df.drop( train_df[(train_df["SalePrice"] < 11.5) & (train_df["MasVnrArea"] > 500)].index ) # # Categorical Features cat_features = [f for f in train_df.columns if train_df[f].dtype == "object"] cat_features def analyzeCategoricalFeature(x, y): f, axes = plt.subplots(1, 3, figsize=(20, 5)) f.suptitle(x) axes[0].set_title("box plot") axes[0].tick_params(axis="x", labelrotation=45) sns.boxplot(x=train_df[x], y=train_df[y], ax=axes[0]) axes[1].set_title("stirp plot") axes[1].tick_params(axis="x", labelrotation=45) sns.stripplot( x=train_df[x], y=train_df[y], jitter=0.4, alpha=0.5, marker="D", size=5, ax=axes[1], ) axes[2].set_title("frequency plot") axes[2].tick_params(axis="x", labelrotation=45) sns.countplot(x=train_df[x], ax=axes[2]) cat_to_drop = [ "Street", "Alley", "LandContour", "Utilities", "LotConfig", "LandSlope", "Condition1", "Condition2", "BldgType", "RoofStyle", "RoofMatl", "Exterior2nd", "BsmtFinType2", "Heating", "Functional", "GarageQual", "GarageCond", "PoolQC", "Fence", "MiscFeature", ] # for f in cat_features: # analyzeCategoricalFeature(f, 'SalePrice') train_df.drop(cat_to_drop, axis=1, inplace=True) test_df.drop(cat_to_drop, axis=1, inplace=True) # missing data cleaning not_missing_cols = [f for f in train_df.columns if train_df[f].isna().sum() == 0] not_missing_cols missing_cols = [f for f in train_df.columns if train_df[f].isna().sum() > 0] train_df[missing_cols].isna().sum().sort_values() test_missing_cols = [f for f in test_df.columns if test_df[f].isna().sum() > 0] test_df[test_missing_cols].isna().sum().sort_values() # missing value imputing combined = train_df.drop("SalePrice", axis=1).append(test_df) missing = combined.isna().sum() > 0 missing_features = missing[missing == True].index print(missing_features) for feature in missing_features: if combined[feature].dtype == "object": combined[feature] = combined.groupby(["Neighborhood", "OverallQual"])[ feature ].transform( lambda x: x.fillna(x.value_counts().index[0]) if (len(x.value_counts().index) > 0) else None ) else: combined[feature] = combined.groupby(["Neighborhood", "OverallQual"])[ feature ].transform(lambda x: x.fillna(x.mean())) missing = combined.isna().sum() > 0 missing_features = missing[missing == True].index print(missing_features) for feature in missing_features: if combined[feature].dtype == "object": combined[feature] = combined.groupby(["Neighborhood"])[feature].transform( lambda x: x.fillna(x.value_counts().index[0]) if (len(x.value_counts().index) > 0) else None ) else: combined[feature] = combined.groupby(["Neighborhood"])[feature].transform( lambda x: x.fillna(x.mean()) ) combined.isna().sum().any() features_to_encode = [f for f in train_df.columns if train_df[f].dtype == "object"] features_to_encode # Categorical Feature Encoding def getObjectColumnsList(df): return [cname for cname in df.columns if df[cname].dtype == "object"] def PerformOneHotEncoding(df, columnsToEncode): return pd.get_dummies(df, columns=columnsToEncode) cat_cols = getObjectColumnsList(combined) combined = PerformOneHotEncoding(combined, features_to_encode) combined # split again train_df_final = combined.iloc[0 : train_df.shape[0]].copy() # df_train.loc[:, "SalePrice"] = np.log(train.SalePrice) test_df_final = combined.iloc[train_df.shape[0] : :].copy() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( train_df_final, train_df["SalePrice"], test_size=0.2, random_state=0 ) print(X_train.count()) print(y_train.count()) # BUILDING MODEL import xgboost as xgb # from sklearn.linear_model import Lasso # models model_xgb = xgb.XGBRegressor(n_estimators=800, learning_rate=0.25) # predictors = ['OverallQual', 'GrLivArea', 'GarageTotal', 'GarageYrBlt', 'TotalBsmtSF', 'FullBath', 'RemodToSold', 'Fireplaces', 'MasVnrArea'] model_xgb.fit(pd.DataFrame(train_df_final), train_df["SalePrice"]) prediction = np.expm1(model_xgb.predict(pd.DataFrame(test_df_final))) prediction from sklearn.model_selection import GridSearchCV parameters = [ { "n_estimators": [200, 400, 600, 800], "max_depth": [3, 4, 5, 6], "learning_rate": [0.001, 0.01, 0.1, 1], "booster": ["gbtree", "gblinear", "dart"], "gamma": [0.001, 0.01, 0.1, 1, 10], "reg_alpha": [0.001, 0.01, 0.1, 1, 10], "reg_lambda": [0.001, 0.01, 0.1, 1, 10], } ] # parameters = [ # { # 'n_estimators': [200], # 'max_depth': [1, 3], # 'learning_rate': [0.001, 0.01, 0.1], # 'booster': ['gbtree', 'gblinear', 'dart'], # 'gamma': [0.001, 0.01], # 'reg_alpha': [0.001, 0.01], # 'reg_lambda': [0.001] # } # ] grid_search = GridSearchCV( estimator=model_xgb, param_grid=parameters, scoring="neg_mean_squared_error", cv=10, n_jobs=-1, ) grid_search = grid_search.fit(pd.DataFrame(train_df_final), train_df["SalePrice"]) best_accuracy = grid_search.best_score_ best_parameters = grid_search.best_params_ print("best accuracy", best_accuracy) print("best parameters", best_parameters) submission = pd.DataFrame({"Id": test_df["Id"], "SalePrice": prediction}) submission.to_csv("submission.csv", index=False)
# <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:center; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">[S03E12] 🚀 Stacking Tuned Models ⚙️🔧 # # Table of Contents # - [1. Loading and Inspecting Data](#loading-data) # - [2. Adversarial Validation](#adv-validation) # - [3. Exploratory Data Analysis](#eda) # - [4. Feature Engineering](#feat-eng) # - [5. Modelling](#modelling) # - [6. Tuning Models with Optuna](#tuning) # - [6.1. CatBoostClassifier](#tuning-catboost) # - [6.2. RandomForestClassifier](#tuning-rf) # - [6.3. Logistic Regression](#tuning-logreg) # - [7. Ensembling Models](#Ensembling) # - [8. Making Final Predictions - Tuned CatBoost Model](#predictions1) # - [9. Making Final Predictions - Ensemble Model](#predictions2) # - [10. Final Scores on Public Leaderboard](#final) # Importing libraries # Data Handling import pandas as pd import numpy as np # Data Visualization import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px from plotly.offline import init_notebook_mode init_notebook_mode(connected=True) # Machine Learning Classification Models from xgboost import XGBClassifier from sklearn.linear_model import LogisticRegression from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from sklearn.ensemble import ( AdaBoostClassifier, RandomForestClassifier, StackingClassifier, ) from sklearn.ensemble import StackingClassifier # Preprocessing imports from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import QuantileTransformer from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.base import BaseEstimator, TransformerMixin from sklearn.pipeline import Pipeline from sklearn.model_selection import ( StratifiedKFold, cross_val_score, KFold, train_test_split, ) # Metrics imports from sklearn.metrics import roc_auc_score, roc_curve, auc # Tuning import optuna # Ignore warnings import warnings warnings.filterwarnings("ignore") # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">1. Loading and Inspecting Data # > 📝 | This competition's data was synthetically generated from the Kidney Stone Prediction based on Urine Analysis dataset. # The goal of this competition is to build a model that is going to output the probabilities of a person to have kidney stones based on the components of this person's urine. The model will be evaluated based on the AUC score. # Load Competition Data train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv") # Loading original Data original_df = pd.read_csv( "/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv" ) # Defining inspection function def inspect_data(df): print(f"\n the dataset has {df.shape[0]} rows and {df.shape[1]} attributes ") print(f"\n Null Values:\n{df.isnull().sum()}") print(f"\n Duplicates: {df.duplicated().sum()}") print(f"\n Data Types:\n{df.dtypes}") inspect_data(train) inspect_data(test) inspect_data(original_df) # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">2. Adversarial Validation # > 📝 | The Adversarial Validation is an important step to decide whether it is adequate to merge the train dataframe and the original dataframe based on how different they are from each other. # We're going to use a classifier to see how well it can distinguish the data sample originated from the original dataset and the train dataset. If our classifier have an AUC score below 0.5, we can infer that there is not much difference between both datasets, and it's safe to merge them. # Labeling data samples from the training set as 'is_train' = 1 train["is_train"] = 1 train # Labeling data samples from the original set as 'is_train' = 0 original_df["is_train"] = 0 original_df # Creating a new dataset by concatenating the train test and the original set av_df = pd.concat([train, original_df], axis=0) av_df = av_df.sample(frac=1, random_state=42) # Shuffling data av_df av_df["is_train"].value_counts() # Counting is_train values # > 💡 | There's a strong class imbalance in our dataset. 414 data samples are from the train set, while only 79 comes from the original dataset. # We're going to use the StratifiedKFold to perform cross-validation for this adversarial validation, since it is more adequate when dealing with strong class imbalance. X = av_df.drop(["is_train", "id"], axis=1) # Independent variables y = av_df.is_train # Target variable # Initiating XGBClassifier to perform a binary logistic classification xgb_model = XGBClassifier(objective="binary:logistic", random_state=42) # Initiating StratifiedKFold cv = StratifiedKFold( n_splits=5, shuffle=True, random_state=42 # 5 Folds # Shuffling data samples ) # Performing Cross-Validation cross_val_scores = [] for i, (train_av, test_av) in enumerate(cv.split(X, y)): X_train, X_test = X.iloc[train_av], X.iloc[test_av] y_train, y_test = y.iloc[train_av], y.iloc[test_av] xgb_model.fit(X_train, y_train) y_pred = xgb_model.predict_proba(X_test)[:, 1] score = roc_auc_score(y_test, y_pred) cross_val_scores.append(score) print(f"Fold {i+1}, AUC Score = {score:.3f}") # Computing the mean AUC scores mean_auc_score = np.mean(cross_val_scores) # Plotting AUC-ROC curve fpr, tpr, _ = roc_curve(y_test, y_pred) plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.4f)" % mean_auc_score) plt.plot([0, 1], [0, 1], linestyle="--", color="gray", label="Random Guess") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.legend() plt.show() # > 💡 | The classifier can't distinguish between data samples from the original and train sets. This is a good indicator that these datasets are extremely similar on their distribution and we can merge them to form a larger training set. # Creating new training set by concatenating the original_df and train set train_df = pd.concat([original_df, train], axis=0) train_df train_df.drop( ["id", "is_train"], axis=1, inplace=True ) # Removing 'id' and 'is_train' columns train_df # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">3. Exploratory Data Analysis # > 📝 | Our training dataset has the following attributes: # .gravity: Gravity measures the density of the urine compared to water. A higher gravity in this case can be an indicator of substances in the urine that can lead to the increased risk of stone formation. # .ph: The ph measures the acidity of the urine. Overall, both a low ph and a high ph can lead to increased risk of stone formation. # .osmo: The osmolarity is proportional to the concentration of molecules in the urine. # .cond: The conductivity measures the ability of urine to conduct an electrical current. A higher level of conductivity may also be an indicator of increased risk for stone formation. # .urea: This is the measure of the concentration of urea, which is a waste product of protein metabolism, in urine. # .calc: A high concentration of calcium in the urine can be a contributor to the formation of kidney stones. # > 📝 | The first thing we're going to analyze is the distribution of the target variable. # Counting values in 'target' variable target_count = train["target"].value_counts() # Creating a new dataframe containing the values of each label values = pd.DataFrame({"target": target_count.index, "count": target_count.values}) # Plotting a pie plot fig = px.pie( values, values="count", names="target", template="ggplot2", title="Target Variable Distribution", ) fig.update_traces(hole=0.4) fig.show() # > 💡 | Most patients, about 55.6% of them, do not have kidney stones. # Even though we have more samples of people without kidney stones, the data imbalance isn't really much strong here. # Listing columns for data analysis cols = ["gravity", "ph", "osmo", "cond", "urea", "calc"] # Plotting histogram for each attribute for col in cols: plt.figure() sns.histplot(data=train_df, x=col, kde=True) plt.title(f"{col} distribution") plt.show() # > 💡 | The attributes not normally distributed, which indicates we may need to transform them to a gaussian-like distribution. # Plotting boxplots for col in cols: plt.figure() sns.boxplot(data=train_df, x=col) plt.title(f"{col} boxplot") plt.show() # > 💡 | There are outliers in gravity and ph, which we may have to deal with later on. # Plotting pairplots to observe relationship among features sns.pairplot(train_df[cols], kind="reg") plt.show() # Plotting correlation heatmap plt.figure(figsize=(12, 8)) corr = train_df.corr() mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True sns.heatmap(corr, annot=True, mask=mask) plt.show() # > 💡 | Strongest correlations: # . osmo and urea (0.82) # . osmo and cond (0.73) # . urea and gravity (0.66) # Boxplots by target for col in cols: fig = px.box( train_df, y="urea", x="target", color="target", title=f"Boxplots of {col} by Target", template="ggplot2", height=600, ) fig.show() # > 💡 | Overall, patients with kidney stones present higher levels of concentration of gravity, osmo, calc, urea, and higher ph than those who are not suffering with kidney stones. We found a relevant pattern. # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">4. Feature Engineering # > 📝 | To increase the predictive powers of our models, I am going to add the following features to our datasets: # . osmo-to-urea-ratio: Osmo divided by urea. # . osmo-to-cond-diff: Osmo subtracted by cond. # . calc-to-ph-ratio: Calc divided by ph. # . osmo-to-urea-diff: Osmo subtracted by urea. # . ph-category: A categorical feature that is going to classify any ph below 6 as “acidic”, ph between 6 and 8 as “natural”, and ph above 8 as “basic”. # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">5. Modelling # Independent feature X = train_df.drop("target", axis=1) y = train_df.target # Target variable # Splitting data X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.7, random_state=42 ) # 70% of data for training # > 📝 | The FeatureEngineering class below performs a bunch of transformation to the data that are going to be added to a Pipeline. # First, we create the new features by performing operations on the input data. Then, we use RobustScaler, which is effective against outliers, to standardize the data. The next step is to use QuantileTransformer to normalize the data into a gaussian-like distribution. Lastly, this is going to return the X variables transformed, standardized, and with new features. # # Creating FeatureEngineering class class FeatureEngineering(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): # New Features X["osmo-to-urea-ratio"] = X["osmo"] / X["urea"] X["osmo-to-cond-dff"] = X["osmo"] - X["cond"] X["calc-to-ph-ratio"] = X["calc"] / X["ph"] X["osmo-to-urea-diff"] = X["osmo"] - X["urea"] X["ph_category"] = pd.cut( X["ph"], bins=[-np.inf, 6, 8, np.inf], labels=["acidic", "neutral", "basic"] ) dummies = pd.get_dummies(X["ph_category"]) X.drop("ph_category", axis=1, inplace=True) # Standardizing data scaler = RobustScaler() X_scaled = scaler.fit_transform(X.values) X = pd.DataFrame(X_scaled, index=X.index, columns=X.columns) # Normalizing Distribution transformer = QuantileTransformer( output_distribution="normal", n_quantiles=X.shape[0] ) X_normalized = transformer.fit_transform(X.values) X = pd.DataFrame(X_normalized, index=X.index, columns=X.columns) X = pd.concat([X, dummies], axis=1) return X # > 📝 | In the code below, we create a function to remove outliers according to the Intequartile Range (IQR). The reason why I didn't add this step into the pipeline, is that we only remove outliers from the training. We can't really perform outlier removal on the test set, as we wouldn't do that in a production environment. # # Creating outlier removal function using the IQR method def outlier_removal(col): Q1 = np.percentile(col, 25) Q3 = np.percentile(col, 75) IQR = Q3 - Q1 lower_bound = Q1 - 1.5 * IQR upper_bound = Q3 + 1.5 * IQR return col[(col > lower_bound) & (col < upper_bound)] # Creating list of columns with outliers outlier_cols = ["gravity", "ph"] # Applying 'outlier_removal' function in the columns above X_train[outlier_cols] = outlier_removal(X_train[outlier_cols]) X_test[outlier_cols] = outlier_removal(X_test[outlier_cols]) # Removing from the y sets the same samples (outliers) removed from the X sets outlier_mask_train = np.all(np.isin(X_train, X_train), axis=1) outlier_mask_test = np.all(np.isin(X_test, X_test), axis=1) X_train = X_train[outlier_mask_train] y_train = y_train[outlier_mask_train] X_test = X_test[outlier_mask_test] y_test = y_test[outlier_mask_test] # Printing shape of X_train and y_train to certify they have the same length print(X_train.shape) print(y_train.shape) # Creating Pipeline pipeline = Pipeline([("feature_engineering", FeatureEngineering())]) # Applying Pipeline to X_train and X_test X_train = pipeline.fit_transform(X_train) X_test = pipeline.transform(X_test) X_train # Visualizing results # > 📝 | Now I'm going to create a list called models that is going to receive some classifiers. # After that, we're going to iterate over each classifier in modelsfit them to the training data and perform probability predictions on the test data. We're then going to print the AUC score for each classifier. # # Creating a 'models' list models = [ LogisticRegression(random_state=42), XGBClassifier(random_state=42), LGBMClassifier(random_state=42), CatBoostClassifier(random_state=42, verbose=False), AdaBoostClassifier(random_state=42), RandomForestClassifier(random_state=42), ] # Iterating through models in the list for i in models: i.fit(X_train, y_train) # Fitting data y_pred = i.predict_proba(X_test)[:, 1] # Predicting probabilities auc_score = roc_auc_score(y_test, y_pred) # Evaluating print(f"{type(i).__name__}: AUC Score = {auc_score:.3f}") # Printing results # > 💡 | The top three best classifiers were: # 1. CatBoostClassifier (AUC = 0.829) # 2. RandomForestClassifier (AUC = 0.824) # 3. LogisticRegression (AUC = 0.809) # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">6. Tuning Models with Optuna # > 📝 | In the next cells of code, we're going to perform a hyperparameter optimization in all our three best models to find the most optimal parameters for higher AUC scores. # # # <div style="padding:10px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">6.1. CatBoostClassifier # Defining trial function def tuning_catboost(trial): # Parameters optimization n_estimators = trial.suggest_int("n_estimators", 50, 1000, step=50) max_depth = trial.suggest_int("max_depth", 2, 10) learning_rate = trial.suggest_float("learning_rate", 0.01, 0.5, log=True) l2_leaf_reg = trial.suggest_float("l2_leaf_reg", 1, 10, log=True) # Initializing the CatBoost model with the parameters above catboost = CatBoostClassifier( n_estimators=n_estimators, max_depth=max_depth, learning_rate=learning_rate, l2_leaf_reg=l2_leaf_reg, random_state=42, verbose=False, ) catboost.fit(X_train, y_train) # Training y_pred = catboost.predict_proba(X_test)[:, 1] # Validating auc_score = roc_auc_score(y_test, y_pred) return auc_score # Returning score study_catboost = optuna.create_study( direction="maximize" ) # Creating study trying to maximize results study_catboost.optimize(tuning_catboost, n_trials=500) # Running optimization search catboost_best_params = study_catboost.best_params # Obtaining the best parameters catboost_best_score = study_catboost.best_value # Obtaining the best score # Printing best parameters and best score print(f"Best params: {catboost_best_params}") print(f"Best score: {catboost_best_score:.3f}") # Creating tuned CatBoost Model tuned_cb = CatBoostClassifier( **catboost_best_params, # Adding the best parameters to the model verbose=False, random_state=42, ) tuned_cb.fit(X_train, y_train) y_pred = tuned_cb.predict_proba(X_test)[:, 1] auc_score = roc_auc_score(y_test, y_pred) # Plotting the AUC-ROC curve and printing the AUC score fpr, tpr, _ = roc_curve(y_test, y_pred) plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.3f)" % auc_score) plt.plot([0, 1], [0, 1], linestyle="--", color="grey", label="Random Guess") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show() print(f"Tuned CatBoostClassifier AUC Score = {auc_score:.3f}") # # # <div style="padding:10px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">6.2. RandomForestClassifier def tuning_rf(trial): n_estimators = trial.suggest_int("n_estimators", 50, 1000, step=50) max_depth = trial.suggest_int("max_depth", 2, 50) min_samples_split = trial.suggest_int("min_samples_split", 2, 20) min_samples_leaf = trial.suggest_int("min_samples_leaf", 1, 20) rf = RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, random_state=42, ) rf.fit(X_train, y_train) y_pred = rf.predict_proba(X_test)[:, 1] auc_score = roc_auc_score(y_test, y_pred) return auc_score study = optuna.create_study(direction="maximize") study.optimize(tuning_rf, n_trials=500) rf_best_params = study.best_params rf_best_score = study.best_value print(f"Best params: {rf_best_params}") print(f"Best score: {rf_best_score:.3f}") tuned_rf = RandomForestClassifier(**rf_best_params, random_state=42) tuned_rf.fit(X_train, y_train) y_pred = tuned_rf.predict_proba(X_test)[:, 1] auc_score = roc_auc_score(y_test, y_pred) fpr, tpr, _ = roc_curve(y_test, y_pred) plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.3f)" % auc_score) plt.plot([0, 1], [0, 1], linestyle="--", color="grey", label="Random Guess") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show() print(f"Tuned RandomForestClassifier AUC Score = {auc_score:.3f}") # # # <div style="padding:10px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">6.3. Logistic Regression def tuning_logistic(trial): C = trial.suggest_float("C", 1e-3, 1e3, log=True) penalty = trial.suggest_categorical("penalty", ["none", "l1", "l2"]) if penalty == "l1": solver = "saga" else: solver = "lbfgs" logistic = LogisticRegression(C=C, penalty=penalty, solver=solver, random_state=42) logistic.fit(X_train, y_train) y_pred = logistic.predict_proba(X_test)[:, 1] auc_score = roc_auc_score(y_test, y_pred) return auc_score study_logistic = optuna.create_study(direction="maximize") study_logistic.optimize(tuning_logistic, n_trials=500) logistic_best_params = study_logistic.best_params logistic_best_score = study_logistic.best_value print(f"Best params: {logistic_best_params}") print(f"Best score: {logistic_best_score:.3f}") tuned_logreg = LogisticRegression( **logistic_best_params, random_state=42, solver="saga" ) tuned_logreg.fit(X_train, y_train) y_pred = tuned_logreg.predict_proba(X_test)[:, 1] auc_score = roc_auc_score(y_test, y_pred) fpr, tpr, _ = roc_curve(y_test, y_pred) plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.3f)" % auc_score) plt.plot([0, 1], [0, 1], linestyle="--", color="grey", label="Random Guess") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show() print(f"Tuned LogisticRegression AUC Score = {auc_score:.3f}") # > 💡 | The tunings worked quite well and we've been able to achieve even higher AUC scores for our models. # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">7. Ensembling Models # > 📝 | Now we're going to use Sklearn's StackingClassifier to create a meta model by ensembling the tuned CatBoost, the tuned RandomForest, and the tuned LogisticRegression all together. # Creating meta model ensemble_model = StackingClassifier( estimators=[ ("CatBoost", tuned_cb), ("RandomForest", tuned_rf), ("LogisticRegression", tuned_logreg), ], cv=5, ) ensemble_model.fit(X_train, y_train) y_pred = ensemble_model.predict_proba(X_test)[:, 1] auc_score = roc_auc_score(y_test, y_pred) print(f"\nEnsemble model AUC score = {auc_score:.3f}\n") fpr, tpr, _ = roc_curve(y_test, y_pred) plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.3f)" % auc_score) plt.plot([0, 1], [0, 1], linestyle="--", color="grey", label="Random Guess") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve - Ensemble Model") plt.legend() plt.show() # > 💡 | We have two different models with the highest scores: # Tuned CatBoostClassifier, with an AUC score of 0.849 # Ensemble Model with an AUC score of 0.844 # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">8. Making Final Predictions - Tuned CatBoost Model # Visualizing test set test test = test.set_index("id") # Removing the index and selecting 'Id' as index test = pipeline.transform(test) # Passing the test set through the pipeline y_pred = tuned_cb.predict_proba(test)[:, 1] # Performing Predictions y_pred # Loading submission dataframe submission = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv") submission # Replacing values in 'target' column by the values in y_pred submission["target"] = y_pred submission # Saving submission as csv submission.to_csv("submission.csv", index=False) # # # <div style="padding:20px; # color:white; # margin:10; # font-size:200%; # text-align:left; # display:fill; # border-radius:5px; # background-color:#191970; # overflow:hidden; # font-weight:700">9. Making Final Predictions - Ensemble Model test # Visualizing test dataframe again # Making predictions y_pred = ensemble_model.predict_proba(test)[:, 1] y_pred # Replacing values submission["target"] = y_pred submission # Saving to CSV file submission.to_csv("submission.csv", index=False)
# # Australian Bushfire - Map analyis # The Australian bushfire has led to massive loss to wildlife, forest area and has even caused human casualities, inclding firefirghters from U.S. It has even affected the air quality in nearby cities such as sydney and melbourne. We will take a look at fire data obtained from NASA satellite's MODIS and VIIRS. # What is covered - # - Regions with Highest recorded fire radiation in a day # - Dates on which bushfires were at peak # - Timeline of bushfire - barplot # - Heat map with time - for Australian bushfire # - Canbbera Fire over last 10 days # - Kangaroo island fire # Note : # - The notebook may take some time to load, load in firefox for faster results. # - Also since the loading time is high we will conly consider data for last 2 months - Dec 1,2019 to Jan 31,2020. # ## Install dependencies and set file path # dependencies import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # plotting import seaborn as sns # for beatiful visualization import folium from folium import plugins # set file path import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) folium.__version__ folium.branca.__version__ # ## Load the data fire_nrt_m6 = pd.read_csv( "../input/australian-bush-fire-satellite-data-nasa/fire_archive_M6_101673.csv" ) fire_archive_m6 = pd.read_csv( "../input/australian-bush-fire-satellite-data-nasa/fire_archive_M6_101673.csv" ) fire_nrt_v1 = pd.read_csv( "../input/australian-bush-fire-satellite-data-nasa/fire_nrt_V1_101674.csv" ) fire_archive_v1 = pd.read_csv( "../input/australian-bush-fire-satellite-data-nasa/fire_archive_V1_101674.csv" ) type(fire_nrt_v1) # Since VIIRS provides more spatial resolution(375m), We will be using VIIRS for further visualization and analysis. # # Merge archive and nrt data # Archive data is between sept 1,2019 to dec 31,2019. # Nrt is between jan 1,2020 to jan 31,2020 # We will be merging both the data df_merged = pd.concat([fire_archive_v1, fire_nrt_v1], sort=True) data = df_merged data.head() data.info() # We will be concentrating particularly on frp(Fire radiation power) which can detect bushfires # ## Filter the data # We will consider only 4 fields - latitude,longitude,acq_date and frp (fire radiation power) dor this analysis. df_filter = data.filter(["latitude", "longitude", "acq_date", "frp"]) df_filter.head() # - **Also since most of the fire activity happened after november, and the complete data takes time to lad in this notebook, will will filter the data between Dec 1, 2019 to Jan 31, 2020** df = df_filter[df_filter["acq_date"] >= "2019-12-01"] df.head() # ## Regions with Highest recorded fire radiation in a day data_topaffected = df.sort_values(by="frp", ascending=False).head(10) data_topaffected # By reverse geocoding we can obtain the locations(Mentioned in Conclusion at the end). # **Below is the map marking the regions which were highest affected in a day** # Create a map m = folium.Map( location=[-35.0, 144], control_scale=True, zoom_start=3, attr="text some" ) df_copy = data_topaffected.copy() # loop through data to create Marker for each hospital for i in range(0, len(df_copy)): folium.Marker( location=[df_copy.iloc[i]["latitude"], df_copy.iloc[i]["longitude"]], # popup=popup, tooltip="frp: " + str(df_copy.iloc[i]["frp"]) + "<br/> date: " + str(df_copy.iloc[i]["acq_date"]), icon=folium.Icon(color="red", icon="fire", prefix="fa"), ).add_to(m) m # ## Dates on which bushfires were at peak dfdate = df[["acq_date", "frp"]].set_index("acq_date") dfdate_highest = dfdate.groupby("acq_date").sum().sort_values(by="frp", ascending=False) dfdate_highest.head(10) # ## Timeline of bushfire - barplot # - Note : this may take sometime to execute # plt.figure(figsize=(10, 5)) sns.set_palette("pastel") ax = sns.barplot(x="acq_date", y="frp", data=df) for ind, label in enumerate(ax.get_xticklabels()): if ind % 10 == 0: # every 10th label is kept label.set_visible(True) else: label.set_visible(False) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.xlabel("Date") plt.ylabel("FRP (fire radiation power)") plt.title("time line of bushfire in Australia") plt.tight_layout() # - The above barplot represents the progress of fire from dec 1, 2019 to jan 31, 2020 # - You can notice three big spikes after 30th december, representing highest frp activity # ## Heat map with time - for Australian bushfire # Note : Play button layer might not display properly in chrome, due to ongoing issue with folium.HeatMapWithTime .**Use firefox**. from folium.plugins import HeatMapWithTime # A small function to get heat map with time given the data def getmap(ip_data, location, zoom, radius): # get day list dfmap = ip_data[["acq_date", "latitude", "longitude", "frp"]] df_day_list = [] for day in dfmap.acq_date.sort_values().unique(): df_day_list.append( dfmap.loc[ dfmap.acq_date == day, ["acq_date", "latitude", "longitude", "frp"] ] .groupby(["latitude", "longitude"]) .sum() .reset_index() .values.tolist() ) # Create a map using folium m = folium.Map(location, zoom_start=zoom, tiles="Stamen Terrain") # creating heatmap with time HeatMapWithTime( df_day_list, index=list(dfmap.acq_date.sort_values().unique()), auto_play=True, radius=radius, gradient={0.2: "blue", 0.4: "lime", 0.6: "orange", 1: "red"}, min_opacity=0.5, max_opacity=0.8, use_local_extrema=True, ).add_to(m) return m getmap(df, [-27, 132], 3.5, 3) # - The above map gives heatmap with time # - Play it at higher fps to increase speed # ## Canbbera Fire over last 10 days # Note : Play button layer might not display properly in chrome, due to ongoing issue with folium.HeatMapWithTime .**Use firefox**. # df tail for the latest data df_10days = df.tail(21500) # Using getmap function to obtain map from above, location set to canberra getmap(df_10days, [-35.6, 149.12], 8, 3) # - You can see the red spot appearing in Canberra over last 4 days, indicating fire activity # ## Kangaroo Island fire # Note : Play button layer might not display properly in chrome, due to ongoing issue with folium.HeatMapWithTime .**Use firefox**. # Using getmap function to obtain map from above, location set to kangaroo island getmap(df, [-36, 137.22], 8.5, 3)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_csv( "/kaggle/input/political-social-media-posts/political_social_media.csv", usecols=[7, 20], names=["bias", "text"], encoding="ISO-8859-1", ) data.loc[data["bias"] == "neutral", "bias"] = 1 data.loc[data["bias"] == "partisan", "bias"] = 0 data.head() import re def preprocess_text(text): # Remove URLs text = re.sub(r"http\S+", "", text) # Remove mentions text = re.sub(r"@\w+", "", text) # Remove hashtags text = re.sub(r"#\w+", "", text) # Remove special characters and digits text = re.sub(r"[^a-zA-Z\s]", "", text) # Convert to lowercase text = text.lower() return text # Apply the preprocessing function to the 'text' column data["cleaned_text"] = data["text"].apply(preprocess_text) # Display the first few rows of the dataset with the cleaned text data.head() text = data["cleaned_text"].to_numpy() X = text[1:] y = data["bias"].values y = y[1:] print(X[:2]) print(y[:2]) print(np.unique(y_train)) from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split # Split the dataset into train, validation, and test sets X_train, X_temp, y_train, y_temp = train_test_split( X, y, test_size=0.2, random_state=42 ) X_val, X_test, y_val, y_test = train_test_split( X_temp, y_temp, test_size=0.5, random_state=42 ) # Create the vectorizer cv = CountVectorizer() # Fit the vectorizer on the training data X_train_cv = cv.fit_transform(X_train) # Transform the validation and test data using the vectorizer X_val_cv = cv.transform(X_val) X_test_cv = cv.transform(X_test) y_train = np.array(y_train, dtype=int) y_test = np.array(y_test, dtype=int) # first I will try to use the logistic regression classifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score # instantiate the classifier log_reg_classifier = LogisticRegression(max_iter=1000) log_reg_classifier.fit(X_train_cv, y_train) # Make predictions on the test set y_test_pred_log_reg = log_reg_classifier.predict(X_test_cv) # Calculate and report the accuracy log_reg_accuracy = accuracy_score(y_test, y_test_pred_log_reg) print(f"Logistic Regression accuracy: {log_reg_accuracy:.4f}") from sklearn.svm import SVC # Instantiate the SVM classifier svm_classifier = SVC() # Train the classifier svm_classifier.fit(X_train_cv, y_train) # Make predictions on the test set y_test_pred_svm = svm_classifier.predict(X_test_cv) # Calculate and report the accuracy svm_accuracy = accuracy_score(y_test, y_test_pred_svm) print(f"SVM accuracy: {svm_accuracy:.4f}") from sklearn.naive_bayes import MultinomialNB # Instantiate the Naive Bayes classifier nb_classifier = MultinomialNB() # Train the classifier nb_classifier.fit(X_train_cv, y_train) # Make predictions on the test set y_test_pred_nb = nb_classifier.predict(X_test_cv) # Calculate and report the accuracy nb_accuracy = accuracy_score(y_test, y_test_pred_nb) print(f"Naive Bayes accuracy: {nb_accuracy:.4f}") from sklearn.metrics import precision_score, recall_score, f1_score def print_metrics(y_true, y_pred, classifier_name): precision = precision_score(y_true, y_pred) recall = recall_score(y_true, y_pred) f1 = f1_score(y_true, y_pred) print(f"{classifier_name} Precision: {precision:.4f}") print(f"{classifier_name} Recall: {recall:.4f}") print(f"{classifier_name} F1-score: {f1:.4f}") print() # SVM Metrics print_metrics(y_test, y_test_pred_svm, "SVM") # Naive Bayes Metrics print_metrics(y_test, y_test_pred_nb, "Naive Bayes") # logistic Regression Metrics print_metrics(y_test, y_test_pred_log_reg, "Logistic Regression")
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # ### Read in data # Read in training data train = pd.read_csv("/kaggle/input/chapman-cs530-redwinequality/train.csv") train.head() # Read in testing data test = pd.read_csv("/kaggle/input/chapman-cs530-redwinequality/test.csv") test.head() # ### Create submissions # Read in sample submissions sample_submission = pd.read_csv( "/kaggle/input/chapman-cs530-redwinequality/sample_submission.csv" ) sample_submission.head() # Create a dummy submission that has entries as many as the test set. y_pred = ( np.random.rand(test.shape[0]) * 10 ) # Create random numbers from 0-10 as dummy solution sample_submission.loc[ :, "Predicted" ] = y_pred # Change the Predicted column to your prediction sample_submission.head() sample_submission.to_csv( "your_submission.csv", header=True, index=False ) # Save the header but not the index
import os import warnings import numpy as np import pandas as pd from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import log_loss warnings.filterwarnings("ignore") # ## Data Preprocessing tourney_result = pd.read_csv( "../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyDetailedResults.csv" ) tourney_seed = pd.read_csv( "../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv" ) tourney_result = pd.merge( tourney_result, tourney_seed, left_on=["Season", "WTeamID"], right_on=["Season", "TeamID"], how="left", ) tourney_result.rename(columns={"Seed": "Seed1"}, inplace=True) tourney_result = tourney_result.drop("TeamID", axis=1) tourney_result = pd.merge( tourney_result, tourney_seed, left_on=["Season", "LTeamID"], right_on=["Season", "TeamID"], how="left", ) tourney_result.rename(columns={"Seed": "Seed2"}, inplace=True) tourney_result = tourney_result.drop("TeamID", axis=1) tourney_result def get_seed(x): return int(x[1:3]) tourney_result["Seed1"] = tourney_result["Seed1"].map(lambda x: get_seed(x)) tourney_result["Seed2"] = tourney_result["Seed2"].map(lambda x: get_seed(x)) tourney_result season_result = pd.read_csv( "../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonDetailedResults.csv" ) season_result.columns season_win_result = season_result[ [ "Season", "WTeamID", "WScore", "WFGM", "WFGA", "WFGM3", "WFGA3", "WFTM", "WFTA", "WOR", "WDR", "WAst", "WTO", "WStl", "WBlk", "WPF", "DayNum", "NumOT", ] ] season_lose_result = season_result[ [ "Season", "LTeamID", "LScore", "LFGM", "LFGA", "LFGM3", "LFGA3", "LFTM", "LFTA", "LOR", "LDR", "LAst", "LTO", "LStl", "LBlk", "LPF", "DayNum", "NumOT", ] ] season_win_result.rename( columns={ "WTeamID": "TeamID", "WScore": "Score", "WFGM": "FGM", "WFGA": "FGA", "WDR": "DR", "WFGA3": "FGA3", "WFGM3": "FGM3", "WFTM": "FTM", "WFTA": "FTA", "WOR": "OR", "WAst": "Ast", "WTO": "TO", "WStl": "Stl", "WBlk": "Blk", "WPF": "PF", }, inplace=True, ) season_lose_result.rename( columns={ "LTeamID": "TeamID", "LScore": "Score", "LFGM": "FGM", "LFGA": "FGA", "LDR": "DR", "LFGA3": "FGA3", "LFGM3": "FGM3", "LFTM": "FTM", "LFTA": "FTA", "LOR": "OR", "LAst": "Ast", "LTO": "TO", "LStl": "Stl", "LBlk": "Blk", "LPF": "PF", }, inplace=True, ) season_result = pd.concat((season_win_result, season_lose_result)).reset_index( drop=True ) season_result season_score = ( season_result.groupby(["Season", "TeamID"])[ [ "Score", "FGM", "FGA", "DR", "FGA3", "FGM3", "FTM", "FTA", "OR", "Ast", "TO", "Stl", "Blk", "PF", ] ] .sum() .reset_index() ) season_score tourney_result = pd.merge( tourney_result, season_score, left_on=["Season", "WTeamID"], right_on=["Season", "TeamID"], how="left", ) tourney_result.rename(columns={"Score": "ScoreT1"}, inplace=True) tourney_result = tourney_result.drop("TeamID", axis=1) tourney_result = pd.merge( tourney_result, season_score, left_on=["Season", "LTeamID"], right_on=["Season", "TeamID"], how="left", ) tourney_result.rename(columns={"Score": "ScoreT2"}, inplace=True) tourney_result = tourney_result.drop("TeamID", axis=1) tourney_result columns = tourney_result.columns columns columns = [ "WTeamID", "LTeamID", "Seed1", "Seed2", "ScoreT1", "FGM_x", "FGA_x", "DR_x", "FGA3_x", "FGM3_x", "FTM_x", "FTA_x", "OR_x", "Ast_x", "TO_x", "Stl_x", "Blk_x", "PF_x", "ScoreT2", "FGM_y", "FGA_y", "DR_y", "FGA3_y", "FGM3_y", "FTM_y", "FTA_y", "OR_y", "Ast_y", "TO_y", "Stl_y", "Blk_y", "PF_y", ] tourney_win_result = tourney_result[columns].copy() tourney_loss_result = tourney_result[columns].copy() tourney_loss_result["Seed1"] = tourney_win_result["Seed2"] tourney_loss_result["Seed2"] = tourney_win_result["Seed1"] tourney_loss_result["ScoreT1"] = tourney_win_result["ScoreT2"] tourney_loss_result["ScoreT2"] = tourney_win_result["ScoreT1"] for c in [ "FGM", "FGA", "DR", "FGA3", "FGM3", "FTM", "FTA", "OR", "Ast", "TO", "Stl", "Blk", "PF", ]: tourney_loss_result[c + "_x"] = tourney_win_result[c + "_y"] tourney_loss_result[c + "_y"] = tourney_win_result[c + "_x"] tourney_win_result["result"] = 1 tourney_loss_result["result"] = 0 tourney_result = pd.concat((tourney_win_result, tourney_loss_result)).reset_index( drop=True ) for c in [ "FGM", "FGA", "DR", "FGA3", "FGM3", "FTM", "FTA", "OR", "Ast", "TO", "Stl", "Blk", "PF", ]: tourney_result[c + "diff"] = tourney_result[c + "_y"] - tourney_result[c + "_x"] tourney_result["result"] tourney_result["Seed_diff"] = tourney_result["Seed1"] - tourney_result["Seed2"] tourney_result["ScoreT_diff"] = tourney_result["ScoreT1"] - tourney_result["ScoreT2"] tourney_result test_df = pd.read_csv( "../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv" ) test_df["Season"] = test_df["ID"].map(lambda x: int(x[:4])) test_df["WTeamID"] = test_df["ID"].map(lambda x: int(x[5:9])) test_df["LTeamID"] = test_df["ID"].map(lambda x: int(x[10:14])) test_df = pd.merge( test_df, tourney_seed, left_on=["Season", "WTeamID"], right_on=["Season", "TeamID"], how="left", ) test_df.rename(columns={"Seed": "Seed1"}, inplace=True) test_df = test_df.drop("TeamID", axis=1) test_df = pd.merge( test_df, tourney_seed, left_on=["Season", "LTeamID"], right_on=["Season", "TeamID"], how="left", ) test_df.rename(columns={"Seed": "Seed2"}, inplace=True) test_df = test_df.drop("TeamID", axis=1) test_df = pd.merge( test_df, season_score, left_on=["Season", "WTeamID"], right_on=["Season", "TeamID"], how="left", ) test_df.rename(columns={"Score": "ScoreT1"}, inplace=True) test_df = test_df.drop("TeamID", axis=1) test_df = pd.merge( test_df, season_score, left_on=["Season", "LTeamID"], right_on=["Season", "TeamID"], how="left", ) test_df.rename(columns={"Score": "ScoreT2"}, inplace=True) test_df = test_df.drop("TeamID", axis=1) test_df["Seed1"] = test_df["Seed1"].map(lambda x: get_seed(x)) test_df["Seed2"] = test_df["Seed2"].map(lambda x: get_seed(x)) for c in [ "FGM", "FGA", "DR", "FGA3", "FGM3", "FTM", "FTA", "OR", "Ast", "TO", "Stl", "Blk", "PF", ]: test_df[c + "diff"] = test_df[c + "_y"] - test_df[c + "_x"] test_df["Seed_diff"] = test_df["Seed1"] - test_df["Seed2"] test_df["ScoreT_diff"] = test_df["ScoreT1"] - test_df["ScoreT2"] test_df = test_df.drop(["ID", "Pred", "Season"], axis=1) test_df MAX_EMBINT = max(tourney_result.WTeamID.unique()) + 1 scaler = StandardScaler() icolumns = [ "LTeamID", "WTeamID", "Seed2", "Seed1", "ScoreT2", "FGM_y", "FGA_y", "DR_y", "FGA3_y", "FGM3_y", "FTM_y", "FTA_y", "OR_y", "Ast_y", "TO_y", "Stl_y", "Blk_y", "PF_y", "ScoreT1", "FGM_x", "FGA_x", "DR_x", "FGA3_x", "FGM3_x", "FTM_x", "FTA_x", "OR_x", "Ast_x", "TO_x", "Stl_x", "Blk_x", "PF_x", ] X, y = tourney_result[columns].values, tourney_result["result"].values X_test = test_df[columns].values X_itest = test_df[icolumns].values X[:, 2:] = scaler.fit_transform(X[:, 2:]) X_test[:, 2:] = scaler.transform(X_test[:, 2:]) X_itest[:, 2:] = scaler.transform(X_itest[:, 2:]) # ## Keras Model import tensorflow as tf import tensorflow_addons as tfa def mish(x): return x * tf.keras.backend.softplus(tf.keras.backend.tanh(x)) def get_model(): feature_inp = tf.keras.layers.Input((30,), name="FeatureInput") id1_inp = tf.keras.layers.Input((1,), name="ID1Input") id2_inp = tf.keras.layers.Input((1,), name="ID2Input") emb = tf.keras.layers.Embedding(MAX_EMBINT, 2, input_length=1) e1 = tf.keras.layers.Flatten()(emb(id1_inp)) e2 = tf.keras.layers.Flatten()(emb(id2_inp)) e1 = tf.keras.layers.Dropout(0.5)(e1) e2 = tf.keras.layers.Dropout(0.5)(e2) x = tf.keras.layers.Dense(128, activation="relu")(feature_inp) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(256, activation="relu")(x) x = tf.keras.layers.Dropout(0.5)(x) e = tf.keras.layers.Concatenate()([e1, e2]) e = tf.keras.layers.Dense(32, activation="relu")(e) e = tf.keras.layers.Dropout(0.5)(e) x = tf.keras.layers.Concatenate()([x, e]) x = tf.keras.layers.Dense(256, activation="relu")(x) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(128, activation="relu")(x) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(1, activation="sigmoid")(x) model = tf.keras.Model([feature_inp, id1_inp, id2_inp], x) model.compile( optimizer=tfa.optimizers.RectifiedAdam(lr=3e-3), loss="binary_crossentropy", metrics=["binary_crossentropy"], ) return model cv = StratifiedKFold(n_splits=20, shuffle=True) losses = [] nn_predicts = [] for i, (train_ind, valid_ind) in enumerate(cv.split(X, y)): tf.keras.backend.clear_session() X_train, X_valid = X[train_ind], X[valid_ind] y_train, y_valid = y[train_ind], y[valid_ind] model = get_model() if i == 0: print(model.summary()) er = tf.keras.callbacks.EarlyStopping( monitor="val_loss", patience=25, restore_best_weights=True ) model.fit( [X_train[:, 2:], X_train[:, 0].astype("int32"), X_train[:, 1].astype("int32")], y_train, epochs=256, batch_size=64, validation_data=[ [ X_valid[:, 2:], X_valid[:, 0].astype("int32"), X_valid[:, 1].astype("int32"), ], y_valid, ], verbose=0, callbacks=[er], ) preds = model.predict( [X_valid[:, 2:], X_valid[:, 0].astype("int32"), X_valid[:, 1].astype("int32")] ) print(f"Fold {i}: {log_loss(y_valid, preds)}") test_pred = ( 0.5 * model.predict( [X_test[:, 2:], X_test[:, 0].astype("int32"), X_test[:, 1].astype("int32")] ) + 0.5 - 0.5 * model.predict( [ X_itest[:, 2:], X_itest[:, 0].astype("int32"), X_itest[:, 1].astype("int32"), ] ) ) nn_predicts.append(test_pred) # Take the average probabilty on 5 folds nn_predicts = np.asarray(predicts) nn_predicts = np.mean(nn_predicts, axis=0) import seaborn as sns sns.distplot(nn_predicts) submission_df = pd.read_csv( "../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv" ) submission_df["Pred"] = nn_predicts submission_df.to_csv("submission.csv", index=False)
# ## Exploratory Data Analysis on Corona Virus # ## What is a Corona Virus? # As listed on WHO website, Coronaviruses (CoV) are a large family of viruses that cause illness ranging from the common cold to more severe diseases such as Middle East Respiratory Syndrome (MERS-CoV) and Severe Acute Respiratory Syndrome (SARS-CoV). A novel coronavirus (nCoV) is a new strain that has not been previously identified in humans. # Common signs of infection include respiratory symptoms, fever, cough, shortness of breath and breathing difficulties. In more severe cases, infection can cause pneumonia, severe acute respiratory syndrome, kidney failure and even death. # ## Objective: # Since we see that outbreak of Corona Virus is increasing Day by day, we can explore trends from the given data and try to predict future. # ## Dataset Source: https://www.kaggle.com/sudalairajkumar/novel-corona-virus-2019-dataset # ## Exploratory Data Analysis # Let's perform EDA on the dataset. # importing all necessary libraries import pandas as pd import numpy as np from datetime import date import matplotlib.pyplot as plt from matplotlib.ticker import StrMethodFormatter import seaborn as sns import plotly.express as px import matplotlib.pyplot as plt import pycountry import plotly.graph_objects as go # Reading the dataset coronaVirus_df = pd.read_csv( "/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv", index_col="Last Update", parse_dates=["Last Update"], ) coronaVirus_df.tail() coronaVirus_df.shape # ### Data Cleaning and Transformation # 1. Check for missing values and filling missing values # 2. Change data type for Last Update column and modify other columns if required. # 3. Remove 'Sno' column as it is not required. # Checking missing values and transforming data coronaVirus_df.isnull().values.any() coronaVirus_df.isnull().sum() # replacing null values in Province/State with Country names coronaVirus_df["Province/State"].fillna(coronaVirus_df["Country"], inplace=True) coronaVirus_df.drop(["Sno"], axis=1, inplace=True) # creating new columns for date, month and time which would be helpful for furthur computation coronaVirus_df["year"] = pd.DatetimeIndex(coronaVirus_df["Date"]).year coronaVirus_df["month"] = pd.DatetimeIndex(coronaVirus_df["Date"]).month coronaVirus_df["time"] = pd.DatetimeIndex(coronaVirus_df["Date"]).time coronaVirus_df.head() # > ### Latest Update on number of confirmed, reported and deaths across the globe**** # We are trying to analyze number of cases reported. # A look at the different cases - confirmed, death and recovered print("Globally Confirmed Cases: ", coronaVirus_df["Confirmed"].sum()) print("Global Deaths: ", coronaVirus_df["Deaths"].sum()) print("Globally Recovered Cases: ", coronaVirus_df["Recovered"].sum()) # ![](http://)It is seen that total of 123360 confirmed cases have been reported, 2646 deaths have been confirmed and 3284 people have sucessfully fought the virus and are showing signs of recovery. The data is from 22nd Jan to 4th Feb 2020. # It is important to analyze latest scenario as per the last update so that we can predict numbers in future. coronaVirus_df[["Confirmed", "Deaths", "Recovered"]].sum().plot(kind="bar") # ### Geographical Widespread of CoronaVirus # Using the given data, Here are few questions which we are going to answer # 1. Total number of countries whch are affected by the virus # 2. Number of confirmed, recovered, deaths cases reported Country wise # 2. Number of confirmed cases reported State/Province wise # 3. Top 5 Affected Countries # 4. Top 5 countries which are unaffected. # 5. Distribution of virus in India and US population. # Total Number Of countries which are affected by the virus coronaVirus_df.Country.unique() # Number of confirmed cases reported Country wise global_confirmed_cases = coronaVirus_df.groupby("Country").sum().Confirmed global_confirmed_cases.sort_values(ascending=False) global_death_cases = coronaVirus_df.groupby("Country").sum().Deaths global_death_cases.sort_values(ascending=False) global_recovered_cases = coronaVirus_df.groupby("Country").sum().Recovered global_recovered_cases.sort_values(ascending=False) # plotting graphs for total Confirmed, Death and Recovery cases plt.rcParams["figure.figsize"] = (12, 9) ax1 = coronaVirus_df[["Date", "Confirmed"]].groupby(["Date"]).sum().plot() ax1.set_ylabel("Total Number of Confirmed Cases") ax1.set_xlabel("Date") ax2 = coronaVirus_df[["Date", "Deaths", "Recovered"]].groupby(["Date"]).sum().plot() ax2.set_ylabel("Recovered and Deaths Cases") ax2.set_xlabel("Date") fig = px.scatter_matrix(coronaVirus_df, dimensions=["Confirmed"], color="Date") fig.show() fig = px.scatter_matrix( coronaVirus_df, dimensions=["Recovered", "Deaths"], color="Date" ) fig.show() # Let's look the various Provinces/States affected data_countryprovince = coronaVirus_df.groupby(["Country", "Province/State"]).sum() data_countryprovince.sort_values(by="Confirmed", ascending=False) # Top Affected countries top_affected_countries = global_confirmed_cases.sort_values(ascending=False) top_affected_countries.head(5) # Finding countries which are relatively safe due to less number of reported cases top_unaffected_countries = global_confirmed_cases.sort_values(ascending=True) top_unaffected_countries.head(5) # Above list are unaffected countries which means that relative to other countries, there are very less number of cases reported. These countries should take all measures to prevent spreading the virus. # ### Plotting cases confirmed in China # Mainland China China_data = coronaVirus_df[coronaVirus_df["Country"] == "China"] China_data x = China_data.groupby("Province/State")["Confirmed"].sum().sort_values().tail(15) x.plot(kind="barh", color="#86bf91") plt.xlabel("Confirmed case Count", labelpad=14) plt.ylabel("States/Province", labelpad=14) plt.title("Confirmed cases count in China states", y=1.02) # 1. > ### ****Geographical Distribution in India and US **** # > Now let's understand distribution of virus in US population US_data = coronaVirus_df[coronaVirus_df["Country"] == "US"] US_data x = ( US_data.groupby("Province/State")["Confirmed"] .sum() .sort_values(ascending=False) .tail(20) ) x x.plot(kind="barh", color="#86bf91") plt.xlabel("Confirmed case Count", labelpad=14) plt.ylabel("States", labelpad=14) plt.title("Confirmed cases count in US states", y=1.02) India_data = coronaVirus_df[coronaVirus_df["Country"] == "India"] India_data import plotly.express as px # India_data = px.data.gapminder().query("country == 'India'") fig = px.bar(US_data, x="Province/State", y="Confirmed") fig.show() # ## Time Series Analysis # It is important to understand correlation of time and cases reported. # Using plotly.express import plotly.express as px import pandas as pd fig = px.line(coronaVirus_df, x="Date", y="Confirmed") fig.show() fig = px.line(coronaVirus_df, x="Date", y="Deaths") fig.show() import pandas as pd import plotly.graph_objects as go fig = go.Figure() fig.add_trace( go.Scatter( x=coronaVirus_df["Date"], y=coronaVirus_df["Confirmed"], name="Confirmed", line_color="deepskyblue", opacity=0.8, ) ) fig.add_trace( go.Scatter( x=coronaVirus_df["Date"], y=coronaVirus_df["Recovered"], name="Recovered", line_color="dimgray", opacity=0.8, ) ) fig.add_trace( go.Scatter( x=coronaVirus_df["Date"], y=coronaVirus_df["Deaths"], name="Deaths", line_color="red", opacity=0.8, ) ) # Use date string to set xaxis range fig.update_layout( xaxis_range=["2020-01-22", "2020-02-03"], title_text="Cases over time" ) fig.show() import pandas as pd import plotly.graph_objects as go fig = go.Figure() fig.add_trace( go.Scatter( x=coronaVirus_df["Date"], y=coronaVirus_df["Recovered"], name="Recovered", line_color="deepskyblue", opacity=0.8, ) ) fig.add_trace( go.Scatter( x=coronaVirus_df["Date"], y=coronaVirus_df["Deaths"], name="Deaths", line_color="red", opacity=0.8, ) ) # Use date string to set xaxis range fig.update_layout( xaxis_range=["2020-01-22 00:00:00", "2020-02-03 23:59:59"], title_text="Recovered vs Deaths over time in China", ) fig.show() import pandas as pd import plotly.graph_objects as go fig = go.Figure() fig.add_trace( go.Scatter( x=coronaVirus_df.time, y=coronaVirus_df["Confirmed"], name="Confirmed", line_color="deepskyblue", opacity=0.8, ) ) # Use date string to set xaxis range fig.update_layout( xaxis_range=["2020-01-31", "2020-02-03"], title_text="Confirmed Cases over time" ) fig.show()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # # READ THE DATA df = pd.read_csv("/kaggle/input/tmdb-movie-metadata/tmdb_5000_movies.csv") df.head() df["original_language"] = df["original_language"].apply(lambda x: 1 if x == "en" else 0) df["original_language"].unique() df.rename(columns={"original_language": "English"}, inplace=True) # # Figuring out how to accept string data as lists in python a = df["production_countries"][0] a import ast a = ast.literal_eval(a) a df["release_date"] = df["release_date"].fillna("1992-09-04") df["release_date"].isna().sum() df["release_date"] = pd.to_datetime(df["release_date"]) df["release_date"] = df["release_date"].apply(lambda x: int(x.year)) df["release_date"].head() df # # DROP NO ESSENTIAL FEATURES df.drop( [ "homepage", "id", "keywords", "original_title", "overview", "status", "tagline", "title", "English", ], axis=1, inplace=True, ) df["production_companies"] = df["production_companies"].apply( lambda x: ast.literal_eval(x) ) df["production_companies"] = df["production_companies"].apply(lambda x: len(x)) df["production_companies"].head() df["genres"] = df["genres"].apply(lambda x: ast.literal_eval(x)) df["genres"] = df["genres"].apply(lambda x: len(x)) df["genres"].head() # # Some Data needs to be length of a list instead of whole list df["production_countries"] = df["production_countries"].apply( lambda x: ast.literal_eval(x) ) df["production_countries"] = df["production_countries"].apply(lambda x: len(x)) df["production_countries"].head() df["spoken_languages"] = df["spoken_languages"].apply(lambda x: ast.literal_eval(x)) df["spoken_languages"] = df["spoken_languages"].apply(lambda x: len(x)) df["spoken_languages"].head() # # RENAMING SOME COLUMNS # df.rename(columns={"spoken_languages": "Number of spoken_languages"},inplace=True) # df.rename(columns={"production_countries": "Number of countries produced in"},inplace=True) # df.rename(columns={"production_companies": "Number of producers"},inplace=True) # # Filling nan values and changing datatypes df["runtime"] = df["runtime"].fillna(df["runtime"].mean()) df["popularity"] = df["popularity"].apply(lambda x: int(x)) df["runtime"] = df["runtime"].apply(lambda x: int(x)) df # # REPLACING 0s df["production_companies"] = df["production_companies"].replace(0, 1) df["production_countries"] = df["production_countries"].replace(0, 1) quant = 0.0156 df["revenue"] = df["revenue"].replace(0, df["revenue"].quantile(quant)) df["budget"] = df["budget"].replace(0, df["budget"].quantile(quant)) df["popularity"] = df["popularity"].replace(0, df["popularity"].quantile(quant)) df["runtime"] = df["runtime"].replace(0, df["runtime"].quantile(quant)) df["spoken_languages"] = df["spoken_languages"].replace(0, 1) # df.drop(['runtime'], axis=1,inplace=True) df.info() df.columns # # Creating features and target label X = df.drop(["revenue"], axis=1) y = df["revenue"] df.describe() # # Scaling the data from sklearn.preprocessing import MaxAbsScaler scaler = MaxAbsScaler() X = scaler.fit_transform(X) # y=scaler.fit_transform(y) # # Trying PCA # import matplotlib.pyplot as plt # from sklearn.decomposition import PCA # pca = PCA() # principalComponents = pca.fit_transform(X) # plt.figure() # plt.plot(np.cumsum(pca.explained_variance_ratio_)) # plt.xlabel('Number of Components') # plt.ylabel('Variance (%)') #for each component # plt.title('Explained Variance') # plt.show() # pca = PCA(n_components=5) # X = pca.fit_transform(X) # # Importing the regression Model from sklearn.ensemble import RandomForestRegressor as forest clf = forest(max_depth=40, max_features=0.4, n_estimators=45, random_state=42) # # Splitiing the data for validation after trainin g from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) # # Training and evaluating the model clf.fit(X_train, y_train) clf.score(X_test, y_test) # # Saving pickles import pickle filename = "RandomForest_model.pickle" pickle.dump(clf, open(filename, "wb")) filename_scaler = "scaler_model.pickle" pickle.dump(scaler, open(filename_scaler, "wb")) # # HYPER PARAMETER TUNING ##Hyper parameter tuning # n_estimators = [int(x) for x in np.linspace(start = 40, stop = 120, num = 10)] # # Number of features to consider at every split # max_features = ['auto', 'sqrt'] # # Maximum number of levels in tree # max_depth = [int(x) for x in np.linspace(10, 75, num = 10)] # max_depth.append(None) # # Minimum number of samples required to split a node # min_samples_split = [2, 5, 10] # # Minimum number of samples required at each leaf node # min_samples_leaf = [1, 2, 4] # random_grid = {'n_estimators': n_estimators, # 'max_features': max_features, # 'max_depth': max_depth, # 'min_samples_split': min_samples_split, # 'min_samples_leaf': min_samples_leaf, # } # from sklearn.model_selection import GridSearchCV # grid_search = GridSearchCV(estimator=clf,param_grid=random_grid,cv=2,n_jobs =-1,verbose = 3) # grid_search.fit(X_train, y_train) # grid_search.best_params_ # [Parallel(n_jobs=-1)]: Using backend LokyBackend with 4 concurrent workers. # [Parallel(n_jobs=-1)]: Done 24 tasks | elapsed: 49.3s # [Parallel(n_jobs=-1)]: Done 120 tasks | elapsed: 6.5min # [Parallel(n_jobs=-1)]: Done 280 tasks | elapsed: 15.5min # [Parallel(n_jobs=-1)]: Done 504 tasks | elapsed: 26.2min # [Parallel(n_jobs=-1)]: Done 792 tasks | elapsed: 33.7min # /opt/conda/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py:706: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak. # "timeout or by a memory leak.", UserWarning # [Parallel(n_jobs=-1)]: Using backend LokyBackend with 4 concurrent workers. # [Parallel(n_jobs=-1)]: Done 24 tasks | elapsed: 49.3s # [Parallel(n_jobs=-1)]: Done 120 tasks | elapsed: 6.5min # [Parallel(n_jobs=-1)]: Done 280 tasks | elapsed: 15.5min # [Parallel(n_jobs=-1)]: Done 504 tasks | elapsed: 26.2min # [Parallel(n_jobs=-1)]: Done 792 tasks | elapsed: 33.7min # /opt/conda/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py:706: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak. # "timeout or by a memory leak.", UserWarning # [Parallel(n_jobs=-1)]: Done 1144 tasks | elapsed: 53.7min
# ### Content Based Recommendation System import pandas as pd df = pd.read_csv("../input/content-based-recomm-sys/movies_metadata.csv") import os os.listdir("../input") df.head() df["tagline"].fillna("") df["description"] = df["overview"] + df["tagline"] # df['description'] = df['description'].fillna('') df.shape df.dropna(subset=["description"], inplace=True) df["title"].drop_duplicates(inplace=True) df.shape df.reset_index() from sklearn.feature_extraction.text import TfidfVectorizer tf = TfidfVectorizer( analyzer="word", ngram_range=(1, 3), min_df=0, stop_words="english" ) tfidf_matrix = tf.fit_transform(df["description"]) print(tfidf_matrix) tfidf_matrix.shape from sklearn.metrics.pairwise import linear_kernel cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix) cosine_similarities.shape cosine_similarities[0].shape # df = df.reset_index() titles = df["title"] indices = pd.Series(df.index, index=df["title"]) def recommend(title): idx = indices[title] sim_scores = list(enumerate(cosine_similarities[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:31] movie_indices = [i[0] for i in sim_scores] return titles.iloc[movie_indices] recommend("The Godfather").head(10) recommend("The Dark Knight Rises").head(10)
# # Session 3: Exploratory Data Analysis # First we import relevant libraries. import pandas as pd import numpy as np import matplotlib # Then we load the file as a data table into the variable `df`. df = pd.read_csv("../input/salary-dataset/salary_dataset.csv") # We view the first five records (through `df.head()`) to get a sense of what data (and specifically, data types) are in the table. df.head()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. from sklearn import ( feature_extraction, linear_model, model_selection, preprocessing, metrics, ) import pandas as pd sample_submission = pd.read_csv("../input/nlp-getting-started/sample_submission.csv") test = pd.read_csv("../input/nlp-getting-started/test.csv") train = pd.read_csv("../input/nlp-getting-started/train.csv") def model_evaluator(model, train_data, target_data, test_data): model.fit(train_data, target_data) score = model_selection.cross_val_score( model, train_data, target_data, cv=3, scoring="f1" ) return score # exploring data train.head() count_vectorizer = feature_extraction.text.CountVectorizer() clf = linear_model.RidgeClassifier() train_vectors = count_vectorizer.fit_transform(train.text[:]) target_vectors = train.target[:] model_selection.cross_val_score( clf, X=train_vectors, y=target_vectors, cv=3, scoring="f1" ) clf = linear_model.RidgeClassifier() train_vectors = count_vectorizer.fit_transform(train.text[:]) target_vectors = train.target[:] test_vectors = count_vectorizer.transform(test.text[:]) clf.fit(train_vectors, target_vectors) test_preds = clf.predict(test_vectors) a = pd.DataFrame({"id": test.id, "target": test_preds}) a.to_csv("submission.csv", index=False) train_vectors.sum(axis=1)[:10] print(train.text[1]) tfidf = feature_extraction.text.TfidfTransformer() train_tfidf = tfidf.fit_transform(train_vectors) test_tfidf = tfidf.transform(test_vectors) a = model_evaluator(clf, train_vectors, target_vectors, test_vectors) print("cross validation score for count_vectorized data:", a.mean()) a = model_evaluator(clf, train_tfidf, target_vectors, test_tfidf) print("cross validation score for TF_IDF data:", a.mean())
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import seaborn as sns file_path = "../input/prestigious-awards-in-india-from-1954-to-2013/Bharat Ratna and Padma Awards.csv" data = pd.read_csv(file_path, index_col="YEAR") # Replacing the values data.replace(to_replace="Awards Not Announced", value="0", inplace=True) data.info() # Converting the object datatype to integer datatype data["BHARAT RATNA"] = data["BHARAT RATNA"].astype(int) data["PADMA VIBHUSHAN"] = data["PADMA VIBHUSHAN"].astype(int) data["PADMA BHUSHAN"] = data["PADMA BHUSHAN"].astype(int) data["PADMA SHRI"] = data["PADMA SHRI"].astype(int) data["TOTAL"] = data["TOTAL"].astype(int) data.info() plt.figure(figsize=(18, 10)) plt.title("National Honours from 1954-2013") sns.lineplot(data=data) # counting total number of awardees for each award x1 = data["PADMA SHRI"].sum() x2 = data["PADMA BHUSHAN"].sum() x3 = data["PADMA VIBHUSHAN"].sum() x4 = data["BHARAT RATNA"].sum() count = { "PADMA SHRI": [x1], "PADMA BHUSHAN": [x2], "PADMA VIBHUSHAN": [x3], "BHARAT RATNA": [x4], } count = pd.DataFrame(count) # Set the width and height of the figure plt.figure(figsize=(20, 6)) # Add title plt.title("Comparison of each category") sns.barplot(data=count) # Add label for vertical axis plt.ylabel("Number of people awarded")
# Nesse notebook farei uma análise do conhecido caso do Titanic. # Será feita uma análise exploratória dos dados, no que seria uma etapa anterior ao processo de predição, para determinação das features (variáveis) mais relevantes na sobrevivência ou não nesta tragédia. # Importando as bibliotecas necessárias para análise dos dados import pandas as pd # Criação e manipulação de dataset import numpy as np # Manipulação de matrizes import matplotlib as plt # Plotagem e visualização dos dados # Criando dataset a partir do arquivo csv dados = pd.read_csv("../input/titanic/train_and_test2.csv") # Visualização inicial do dataset para primeira análise dados.head() # Podemos perceber que a tabela acima tem muitas colunas somente com valor 0. # Além disso, algumas colunas não irão agregar valor na análise exploratória dos dados e serão retiradas do dataset. # Removendo colunas que não serão utilizadas na análise dados.drop( columns=[ "Passengerid", "zero", "zero.1", "zero.2", "zero.3", "zero.4", "zero.5", "zero.6", "zero.7", "zero.8", "zero.9", "zero.10", "zero.11", "zero.12", "zero.13", "zero.14", "zero.15", "zero.16", "zero.17", "zero.18", ] ) import pandas as pd train_and_test2 = pd.read_csv("../input/titanic/train_and_test2.csv")
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import sys sys.path.append("/kaggle/input/test-modules3/") # https://github.com/qubvel/classification_models/tree/master/classification_models/models import keras from models_factory import ModelsFactory class KerasModelsFactory(ModelsFactory): @staticmethod def get_kwargs(): return { "backend": keras.backend, "layers": keras.layers, "models": keras.models, "utils": keras.utils, } Classifiers = KerasModelsFactory() ResNet18, preprocess_input = Classifiers.get("resnet18") model = ResNet18((224, 224, 3)) test_batch = np.random.rand(32, 224, 224, 3) predict = model.predict(test_batch) predict
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pathlib, os, random, mplcyberpunk import splitfolders import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator import numpy as np import pandas as pd import tensorflow as tf from keras.layers import ( BatchNormalization, Dense, Dropout, Flatten, MaxPool2D, Conv2D, Activation, ) base_path = ( r"/kaggle/input/tuberculosis-tb-chest-xray-dataset/TB_Chest_Radiography_Database/" ) base_path = pathlib.Path(base_path) base_path splitfolders.ratio( base_path, output="X_ray_Imgs", seed=123, ratio=(0.7, 0.15, 0.15), group_prefix=None ) # necessary libraries import os import pandas as pd # visualizations libraries import matplotlib.pyplot as plt import matplotlib.image as mpimg from matplotlib.image import imread # tensorflow libraries import tensorflow as tf from tensorflow import keras from keras.preprocessing.image import ImageDataGenerator from keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from keras.models import Sequential, Model from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout from keras import optimizers from tensorflow.keras.utils import load_img from tensorflow.keras.utils import img_to_array # model evaluation libraries from sklearn.metrics import confusion_matrix, accuracy_score, classification_report from mlxtend.plotting import plot_confusion_matrix from glob import glob import os import numpy as np import pandas as pd import random from skimage.io import imread import matplotlib.pyplot as plt from keras.applications.resnet import ResNet50 from keras.applications.resnet import preprocess_input, decode_predictions from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D from tensorflow.keras.preprocessing.image import ( ImageDataGenerator, img_to_array, load_img, ) from keras.applications.mobilenet_v2 import MobileNetV2 from keras.layers import Dense, Dropout, Flatten, BatchNormalization, GlobalMaxPooling2D batch_size = 20 img_height, img_width = 200, 200 input_shape = (img_height, img_width, 3) datagen = ImageDataGenerator(rescale=1 / 255) train_data = datagen.flow_from_directory( "X_ray_Imgs/train", target_size=(200, 200), batch_size=batch_size, class_mode="categorical", subset="training", ) test_data = datagen.flow_from_directory( "X_ray_Imgs/test", target_size=(200, 200), batch_size=batch_size, class_mode="categorical", shuffle=False, ) val_data = datagen.flow_from_directory( "X_ray_Imgs/val/", target_size=(200, 200), batch_size=batch_size, class_mode="categorical", shuffle=False, ) class_name = train_data.class_indices class_names = list(class_name.keys()) class_name num_classes = 2 model = Sequential() model.add( ResNet50( input_shape=(200, 200, 3), include_top=False, pooling="avg", weights="imagenet" ) ) model.add(Dense(num_classes, activation="softmax")) model.layers[0].trainable = False model.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"]) history = model.fit( train_data, steps_per_epoch=train_data.samples // train_data.batch_size, validation_data=val_data, validation_steps=val_data.samples // val_data.batch_size, epochs=10, verbose=1, ) history.history.keys() epochs = range(1, len(history.history["accuracy"]) + 1) plt.plot(epochs, history.history["accuracy"], color="purple") plt.plot(epochs, history.history["val_accuracy"], color="pink") plt.xlabel("epochs") plt.ylabel("accuracy") plt.title("Accuracy plot") plt.legend(["train_acc", "val_acc"]) plt.show() plt.plot(epochs, history.history["loss"], color="purple") plt.plot(epochs, history.history["val_loss"], color="pink") plt.xlabel("epochs") plt.ylabel("loss") plt.title("Loss plot") plt.legend(["train_loss", "val_loss"]) plt.show() import numpy as np prediction = model.predict( val_data, steps=np.ceil(val_data.samples / val_data.batch_size), verbose=1 ) prediction = prediction > 0.5 prediction val_labels = val_data.classes val_labels pred1 = [i[0] for i in prediction] pred2 = [i[1] for i in prediction] cm = confusion_matrix(val_data.classes, pred2) plot_confusion_matrix(cm, figsize=(5, 5)) print(accuracy_score(val_data.classes, pred2)) print(classification_report(val_data.classes, pred2)) # Importing the metrics package from sklearn library from sklearn import metrics # Creating the confusion matrix cm = metrics.confusion_matrix(val_data.classes, pred2) # Assigning columns names cm_df = pd.DataFrame( cm, columns=["Predicted Negative", "Predicted Positive"], index=["Actual Negative", "Actual Positive"], ) # Showing the confusion matrix cm_df # Creating a function to report confusion metrics def confusion_metrics(conf_matrix): # save confusion matrix and slice into four pieces TP = conf_matrix[1][1] TN = conf_matrix[0][0] FP = conf_matrix[0][1] FN = conf_matrix[1][0] print("True Positives:", TP) print("True Negatives:", TN) print("False Positives:", FP) print("False Negatives:", FN) # calculate accuracy conf_accuracy = float(TP + TN) / float(TP + TN + FP + FN) # calculate mis-classification conf_misclassification = 1 - conf_accuracy # calculate the sensitivity conf_sensitivity = TP / float(TP + FN) # calculate the specificity conf_specificity = TN / float(TN + FP) # calculate precision conf_precision = TN / float(TN + FP) # calculate f_1 score conf_f1 = 2 * ( (conf_precision * conf_sensitivity) / (conf_precision + conf_sensitivity) ) print("-" * 50) print(f"Accuracy: {round(conf_accuracy,2)}") print(f"Mis-Classification: {round(conf_misclassification,2)}") print(f"Sensitivity: {round(conf_sensitivity,2)}") print(f"Specificity: {round(conf_specificity,2)}") print(f"Precision: {round(conf_precision,2)}") print(f"f_1 Score: {round(conf_f1,2)}") confusion_metrics(cm) from sklearn.metrics import roc_curve, auc fpr, tpr, threshold = roc_curve(val_data.classes, pred2) auc_resnet = auc(fpr, tpr) plt.figure(figsize=(5, 5), dpi=100) plt.plot(fpr, tpr, linestyle="-", label="Resnet50 (auc = %0.3f)" % auc_resnet) plt.xlabel("False Positive Rate -->") plt.ylabel("True Positive Rate -->") plt.legend() plt.title("ROC curve") plt.show() import gradio as gr class_names = ["Normal", "Tuberculosis"] def predict_image(img1): img1 = img1.reshape(200, 200, -1) img1 = tf.keras.utils.img_to_array(img1) img1 = np.expand_dims(img1, axis=0) img1 = img1 / 255 prediction = model.predict(img1).flatten() print(prediction) for i in range(2): print(class_names[i], float(prediction[i])) return {class_names[i]: float(prediction[i]) for i in range(2)} image = gr.inputs.Image(shape=(200, 200)) label = gr.outputs.Label(num_top_classes=2) gr.Interface( fn=predict_image, inputs=image, outputs=label, interpretation="default" ).launch(debug="True") # predicting an image from keras.preprocessing import image import numpy as np image_path = "/kaggle/input/tuberculosis-tb-chest-xray-dataset/TB_Chest_Radiography_Database/Tuberculosis/Tuberculosis-2.png" new_img = tf.keras.utils.load_img(image_path, target_size=(200, 200)) img = tf.keras.utils.img_to_array(new_img) img = np.expand_dims(img, axis=0) img = img / 255 print("Following is our prediction:") prediction = model.predict(img) # decode the results into a list of tuples (class, description, probability) # (one such list for each sample in the batch) d = prediction.flatten() j = d.max() # print(d) # print(j) for index, item in enumerate(d): if item == j: print(item) print(j) class_name = class_names[index] print(class_names[index]) # Another way # img_class = model.predict_classes(img) # img_prob = model.predict_proba(img) # print(img_class ,img_prob ) # ploting image with predicted class name plt.figure(figsize=(4, 4)) plt.imshow(new_img) plt.axis("off") plt.title(class_name) plt.show()
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt sns.set_style("whitegrid") df_2017 = pd.read_csv("../input/utmb_results_2017.csv") df_2018 = pd.read_csv("../input/utmb_results_2018.csv") df_2019 = pd.read_csv("../input/utmb_results_2019.csv") df_2017.head() df_2018.head() df_2019.head() df_2017["Year"] = 2017 df_2018["Year"] = 2018 df_2019["Year"] = 2019 df = pd.concat([df_2017, df_2018, df_2019]) # combine the three dataframes df = df.drop(["Unnamed: 0"], axis=1) df.columns df.info() df.head() def convert_to_minutes(row): return sum(i * j for i, j in zip(map(float, row.split(":")), [60, 1, 1 / 60])) df["Minutes"] = df["Time"].apply(convert_to_minutes) df["Minutes"] = df["Minutes"].round(2) df.head() plt.figure(num=None, figsize=(8, 6), dpi=80) plt.hist(df["Minutes"], alpha=0.5) plt.title("2017 & 2018 & 2019 UTMB Times", fontsize=18, fontweight="bold") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel("Time (min)", fontsize=18) plt.ylabel("Frequency", fontsize=18) plt.show() df.describe() plt.figure(figsize=(8, 6), dpi=80) sns.boxplot(x="Year", y="Minutes", data=df) plt.title("UTMB Results by Year", fontsize=18, fontweight="bold") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel("Year", fontsize=18) plt.ylabel("Minutes", fontsize=18) plt.figure(figsize=(8, 6), dpi=80) sns.violinplot(x="Year", y="Minutes", data=df, inner="quartile") plt.title("UTMB Results by Year", fontsize=18, fontweight="bold") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel("Year", fontsize=18) plt.ylabel("Minutes", fontsize=18) plt.savefig("UTMB ViolinPlot.png") # Add Gender df["Cat."].value_counts() df[df["Cat."].str.contains("H", regex=False) == False] df.loc[df["Cat."].str.contains("H", regex=False) == False, "Gender"] = "Female" df.loc[df["Cat."].str.contains("H", regex=False) == True, "Gender"] = "Male" df plt.figure(figsize=(12, 10), dpi=80) sns.swarmplot(x="Year", y="Minutes", hue="Gender", data=df) plt.title("UTMB Results by Year", fontsize=18, fontweight="bold") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel("Year", fontsize=18) plt.ylabel("Minutes", fontsize=18) df.loc[df["Cat."].str.contains("E", regex=False) == True, "Age group"] = "22-39" df.loc[df["Cat."].str.contains("1", regex=False) == True, "Age group"] = "40-49" df.loc[df["Cat."].str.contains("2", regex=False) == True, "Age group"] = "50-59" df.loc[df["Cat."].str.contains("3", regex=False) == True, "Age group"] = "60-69" df.loc[df["Cat."].str.contains("4", regex=False) == True, "Age group"] = "70" # subset only men's results men = df.loc[df["Gender"] == "Male"] # plot violin and swarm plots by age plt.figure(figsize=(8, 6), dpi=80) sns.violinplot( x="Age group", y="Minutes", data=men, color="lightblue", inner="quartile" ) plt.title("Mens UTMB Results by Age", fontsize=18, fontweight="bold") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel("Age Groups", fontsize=18) plt.ylabel("Minutes", fontsize=18) plt.savefig("UTMB Mens SwarmPlot.png") # subset only women's results women = df.loc[df["Gender"] == "Female"] # plot violin and swarm plots by age categories plt.figure(figsize=(8, 6), dpi=80) sns.violinplot( x="Age group", y="Minutes", data=women, color="lightblue", inner="quartile" ) sns.swarmplot(x="Age group", y="Minutes", data=women, color="darkblue") plt.title("Womens UTMB Results by Age", fontsize=18, fontweight="bold") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel("Age Groups", fontsize=18) plt.ylabel("Minutes", fontsize=18) plt.savefig("UTMB Womens SwarmPlot.png") df[df["Gender"] == "Female"]["Age group"].value_counts() group_times = women["Minutes"].where(women["Age group"] == "22-39").dropna() group_times # subset my age group results group_times = women["Minutes"].where(women["Age group"] == "22-39").dropna() # 25, 50 and 75 percentiles for total time np.round(np.percentile(group_times, [25, 50, 75]) / 60, 1) # 25, 50 and 75 percentiles for calculated per km pace np.round(np.percentile(group_times, [25, 50, 75]) / 171, 1) import plotly.graph_objects as go fig = go.Figure( data=[ go.Table( header=dict( values=["Finish", "Total time, hours", "Pace required, min/km"] ), cells=dict( values=[ ["In the top 25%", "In the top 50%", "In the top 75%"], np.round(np.percentile(group_times, [25, 50, 75]) / 60, 1), np.round(np.percentile(group_times, [25, 50, 75]) / 171, 1), ] ), ) ] ) fig.show() df["Nationality"].value_counts() df["Nationality"].value_counts().to_dict().items() items = df["Nationality"].value_counts().to_dict().items() # Filtering only those rows where duplicate entries occur more than n n = 80 nations = df[df["Nationality"].isin([key for key, val in items if val > n])][ "Nationality" ].value_counts() nations nations["rest"] = ( df[df["Nationality"].isin([key for key, val in items if val < n])]["Nationality"] .value_counts() .sum() ) nations nations.tolist() labels = nations.index.tolist() counts = nations.tolist() fig1, ax1 = plt.subplots(figsize=(13, 13)) ax1.pie(counts, labels=labels, autopct="%1.1f%%", startangle=30) ax1.axis("equal") plt.show()
import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder from sklearn.model_selection import ( train_test_split, KFold, cross_val_score, StratifiedKFold, ) from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression, PassiveAggressiveClassifier from sklearn.ensemble import ( GradientBoostingClassifier, RandomForestClassifier, ExtraTreesRegressor, ) from sklearn.decomposition import PCA from sklearn.feature_selection import RFE from sklearn.metrics import accuracy_score from sklearn.mixture import GaussianMixture from category_encoders import LeaveOneOutEncoder, BinaryEncoder, TargetEncoder import time import logging sample_submission = pd.read_csv("../input/cat-in-the-dat-ii/sample_submission.csv") test = pd.read_csv("../input/cat-in-the-dat-ii/test.csv") train = pd.read_csv("../input/cat-in-the-dat-ii/train.csv") def replace_nans(dataframe): for each in dataframe.columns: if each == "id": continue if dataframe[each].dtype != "object" or dataframe[each].dtype != "datetime64": dataframe.loc[:, each] = dataframe.fillna(dataframe[each].mode()[0]) else: dataframe.loc[:, each] = dataframe.fillna("UNKNOWN") return dataframe def encoder(dataframe, columns, enc_type="bin"): if enc_type == "bin": for col in columns: unique = dataframe[col].unique() dataframe.loc[:, col] = dataframe[col].apply( lambda x: 1 if x == unique[0] else (0 if x == unique[1] else None) ) if enc_type == "ord": encoder = OrdinalEncoder(dtype=np.int16) for col in columns: dataframe.loc[:, col] = encoder.fit_transform( np.array(dataframe[col]).reshape(-1, 1) ) return dataframe def rank_features(estimator, X_train, y_train): selector = RFE(estimator, 10, step=1) selector = selector.fit(X_train, y_train) return selector.ranking_ def fitter(clf, X_train, X_test, y_train, y_test): print("training ", clf) y_train = np.array([[target] for target in y_train]) y_test = np.array([[target] for target in y_test]) clf.fit(X_train, y_train) # predictions = clf.predict(X_test) # print('accuracy:', accuracy_score(y_test, predictions)) try: print("score:", clf.score(clf, X_test, y_test)) except Exception: print(clf.best_score_) return clf def main_2(): data = train data = replace_nans(data) submission_data = replace_nans(test) print(data.columns) nom_cols = ["nom_0", "nom_1", "nom_2"] ord_cols = ["ord_3", "ord_4", "ord_5"] bin_cols = ["bin_3", "bin_4"] ord_encoder = OrdinalEncoder() for enc in ord_cols + nom_cols: data[enc] = ord_encoder.fit_transform(np.array(data[enc]).reshape(-1, 1)) submission_data[enc] = ord_encoder.fit_transform( np.array(submission_data[enc]).reshape(-1, 1) ) for enc in ["nom_3", "nom_4"]: enc1 = pd.get_dummies(data[enc], prefix=enc) data.drop(columns=enc, inplace=True) data = pd.concat([data, enc1], axis=1) for enc in ["nom_3", "nom_4"]: enc1 = pd.get_dummies(submission_data[enc], prefix=enc) submission_data.drop(columns=enc, inplace=True) submission_data = pd.concat([submission_data, enc1], axis=1) target = data["target"] data = data.drop("target", axis=1) # for enc in ['nom_5', 'nom_6', 'nom_7', 'nom_8', 'nom_9']: loo_enc = LeaveOneOutEncoder( cols=["nom_5", "nom_6", "nom_7", "nom_8", "nom_9"], return_df=True ) loo_enc.fit(data, target) data = loo_enc.transform(data) # print(data) # submission_data[enc] = target_enc.transform(submission_data[enc].values.reshape(-1, 1)) submission_data = loo_enc.transform(submission_data) data = encoder(data, ["ord_1", "ord_2"], enc_type="ord") data = encoder(data, bin_cols, enc_type="bin") submission_data = encoder(submission_data, ["ord_1", "ord_2"], enc_type="ord") submission_data = encoder(submission_data, bin_cols, enc_type="bin") time_features = ["day", "month"] for feature in time_features: data[feature + "_sin"] = np.sin( (2 * np.pi * data[feature]) / max(data[feature]) ) data[feature + "_cos"] = np.cos( (2 * np.pi * data[feature]) / max(data[feature]) ) data.drop(time_features, axis=1, inplace=True) for feature in time_features: submission_data[feature + "_sin"] = np.sin( (2 * np.pi * submission_data[feature]) / max(submission_data[feature]) ) submission_data[feature + "_cos"] = np.cos( (2 * np.pi * submission_data[feature]) / max(submission_data[feature]) ) submission_data.drop(time_features, axis=1, inplace=True) # X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.25) clf_2 = LogisticRegression(solver="saga", verbose=1) clf_3 = GradientBoostingClassifier( n_estimators=100, verbose=1, learning_rate=0.05, max_depth=7 ) clf_4 = PassiveAggressiveClassifier(verbose=1) clf_5 = RandomForestClassifier( n_estimators=500, verbose=2 ) # , criterion='entropy') clf_6 = ExtraTreesRegressor(n_estimators=500, bootstrap=False, n_jobs=2, verbose=1) # clf_6 = GradientBoostingRegressor(n_estimators=500, learning_rate=0.1, verbose=1) kf = KFold(n_splits=5) for train_index, test_index in kf.split(data): X_train, X_test = data.values[train_index], data.values[test_index] y_train, y_test = target.values[train_index], target.values[test_index] clf_3.fit(X_train, y_train) print(clf_3.score(X_test, y_test)) predictions = clf_3.predict_proba(submission_data.values) predictions = [x[1] for x in predictions] # print(predictions) submission_data = pd.read_csv("../input/cat-in-the-dat-ii/test.csv") submission_data["target"] = predictions submission_data = pd.concat( [submission_data["id"], submission_data["target"]], axis=1 ) submission_data.to_csv("submission.csv", index=False) main_2()
# Importing the necessary libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.metrics import accuracy_score, confusion_matrix import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # Let's look at the data. train = pd.DataFrame(pd.read_csv("/kaggle/input/mushroom-classification/mushrooms.csv")) train.columns # Renaming the columns train.columns = [ "class", "cap_shape", "cap_surface", "cap_color", "bruises", "odor", "gill_attachment", "gill_spacing", "gill_size", "gill_color", "stalk_shape", "stalk_root", "stalk_surface_above_ring", "stalk_surface_below_ring", "stalk_color_above_ring", "stalk_color_below_ring", "veil_type", "veil_color", "ring_number", "ring_type", "spore_print_color", "population", "habitat", ] # Checking for missing values train.head(5) train.isna().sum() # train['stalk-root'].unique() train.dtypes # Let's map Categorical variables mapping = [ {"e": 1, "p": 0}, {"b": 0, "c": 1, "x": 2, "f": 3, "k": 4, "s": 5}, {"f": 0, "g": 1, "y": 2, "s": 3}, {"n": 0, "b": 1, "c": 2, "g": 3, "r": 4, "p": 5, "u": 6, "e": 7, "w": 8, "y": 9}, {"t": 1, "f": 0}, {"a": 1, "l": 2, "c": 3, "y": 4, "f": 5, "m": 6, "n": 0, "p": 7, "s": 8}, {"a": 0, "d": 1, "f": 2, "n": 3}, {"c": 0, "w": 1, "d": 2}, {"b": 0, "n": 1}, { "k": 0, "n": 1, "b": 2, "h": 3, "g": 4, "r": 5, "o": 6, "p": 7, "u": 8, "e": 9, "w": 10, "y": 11, }, {"e": 0, "t": 1}, {"b": 0, "c": 1, "u": 2, "e": 3, "z": 4, "r": 5, "?": 6}, {"f": 0, "y": 1, "k": 2, "s": 3}, {"f": 0, "y": 1, "k": 2, "s": 3}, {"n": 0, "b": 1, "c": 2, "g": 3, "o": 4, "p": 5, "e": 5, "w": 6, "y": 7}, {"n": 0, "b": 1, "c": 2, "g": 3, "o": 4, "p": 5, "e": 6, "w": 7, "y": 8}, {"p": 0, "u": 1}, {"n": 0, "o": 1, "w": 2, "y": 3}, {"n": 0, "o": 1, "t": 2}, {"c": 4, "e": 1, "f": 2, "l": 3, "n": 0, "p": 5, "s": 6, "z": 7}, {"k": 0, "n": 1, "b": 2, "h": 3, "r": 4, "o": 5, "u": 6, "w": 7, "y": 8}, {"a": 0, "c": 1, "n": 2, "s": 3, "v": 4, "y": 5}, {"g": 0, "l": 1, "m": 2, "p": 3, "u": 4, "w": 5, "d": 6}, ] len(mapping), len(train.columns) for i in range(len(train.columns)): train[train.columns[i]] = train[train.columns[i]].map(mapping[i]).astype(int) # Data types have changed from object to int train.shape # Separating depend varible from predictors and splitting the dataset x = train.iloc[:, 1:] y = train.iloc[:, 0] x_tr, x_ts, y_tr, y_ts = train_test_split(x, y, test_size=0.2) # Predictions using Logistic regression lr = LogisticRegression() lr.fit(x_tr, y_tr) accuracy_score(y_ts, lr.predict(x_ts)), confusion_matrix(y_ts, lr.predict(x_ts)) # Predictions using KNN kn = KNeighborsClassifier(n_neighbors=5, metric="minkowski", p=2) kn.fit(x_tr, y_tr) accuracy_score(y_ts, kn.predict(x_ts)), confusion_matrix(y_ts, kn.predict(x_ts)) # Linear SVM svm = SVC(kernel="linear", random_state=0) svm.fit(x_tr, y_tr) ysvc_pred = svm.predict(x_ts) accuracy_score(y_ts, ysvc_pred), confusion_matrix(y_ts, ysvc_pred) # Kernel SVM kersvm = SVC(kernel="rbf", random_state=0) kersvm.fit(x_tr, y_tr) yksvm_pred = kersvm.predict(x_ts) accuracy_score(y_ts, yksvm_pred), confusion_matrix(y_ts, yksvm_pred) # Naive Bayes gnb = GaussianNB() gnb.fit(x_tr, y_tr) ygnb_pred = gnb.predict(x_ts) accuracy_score(y_ts, ygnb_pred), confusion_matrix(y_ts, ygnb_pred) # Decision Trees dct = DecisionTreeClassifier(random_state=0) dct.fit(x_tr, y_tr) ydct_pred = dct.predict(x_ts) accuracy_score(y_ts, ydct_pred), confusion_matrix(y_ts, ydct_pred) # Random Forest rf = RandomForestClassifier(random_state=0, n_estimators=100) rf.fit(x_tr, y_tr) yrf_pred = rf.predict(x_ts) accuracy_score(y_ts, yrf_pred), confusion_matrix(y_ts, yrf_pred) # XGBoost xgb = XGBClassifier() xgb.fit(x_tr, y_tr) y_xgb = xgb.predict(x_ts) accuracy_score(y_ts, y_xgb), confusion_matrix(y_ts, y_xgb) x_tr.shape # For the task submission , Using ANN classifier = Sequential() classifier.add(Dense(64, activation="relu", input_dim=22)) # classifier.add(Dense(output_dim=1,init='uniform',activation='relu')) classifier.add(Dense(1, activation="sigmoid")) classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) classifier.fit(x_tr, y_tr, batch_size=10, epochs=100) y_pred = classifier.predict(x_ts) y_pred = y_pred > 0.5 confusion_matrix(y_ts, y_pred)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") train.head() print(f"Train has {train.shape[0]} rows and {train.shape[1]} columns") print(f"Test has {test.shape[0]} rows and {test.shape[1]} columns") train.info() test.info() train = train.fillna(0) test = test.fillna(0) print(train.isna().sum()) print(test.isna().sum()) def objtypelist(df): objecttype = [] for col in df.columns: if df[col].dtype == np.float64 or df[col].dtype == np.int64: pass else: objecttype.append(col) return objecttype train_obj = objtypelist(train) test_obj = objtypelist(test) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() train.corr()["SalePrice"].sort_values() plt.figure(figsize=(20, 10)) sns.heatmap(train.corr(), annot=True, cmap="Reds") plt.show() X = train[["OverallQual"]] y = train["SalePrice"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.7, random_state=100 ) from sklearn.linear_model import LinearRegression mod = LinearRegression() mod.fit(X_train, y_train) mod.intercept_, mod.coef_ from sklearn.metrics import r2_score, mean_squared_error y_train_pred = mod.predict(X_train) r2_score(y_train, y_train_pred) test["SalePrice"] = -90334.15280801835 + 44467.70101543 * test["OverallQual"] test["SalePrice"] my_submission = pd.DataFrame({"Id": test.Id, "SalePrice": test["SalePrice"]}) my_submission.to_csv("submission.csv", index=False)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # data visualization library # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # context # I am interested in looking at airbnb price trends by date and availability. # Are the booked reservations on average cheaper than the available ones? # what do the price clusters look like over time? Do the clusters change over time? # is there a time of year that is cheaper to book? # I want to understand when is the cheapest time to book df = pd.read_csv("/kaggle/input/airbnb-nov-2019-cal/calendar.csv") # read csv file df.head() # check the first five rows to see how data looks df_dp = df[ ["date", "price"] ] # clean up data to isolate the two varibales we are interested in df_dp.head() # check the last five rows to see how the data looks # kaggle got annoyed with me and made me include the following lines of code from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() # First,convert the columns that have numbers into numeric values so they can be easily plotted # change the dates from string to datetime data types x_numbers = pd.to_datetime(df_dp["date"]) # remove the $ from the price convert price from string to float datatype y_numbers = df_dp["price"].replace("[\$,]", "", regex=True).astype(float) # create a scatter plot with date and price plt.scatter(x_numbers, y_numbers, s=5) # show scatter plot plt.show() # it is interesting to see the different pricing tiers and their consistency over time # I was curious how a line graph would look. # Turns out it tells us nothing becuase there are way too many price points at each moment in time # Pretty hilarious plt.plot(x_numbers, y_numbers) # A histgram is probably more useful although I have no idea how to select the number of bins # I am just going to play with it and increase bin size until I can see a good amount of detail # limit the x axis to zoom in on histogram plt.xlim([0, 1200]) # lable the axises for clarity plt.ylabel("number of listings") plt.xlabel("price ($)") # give plot a title plt.title("Seattle Airbnb Prices 2019 - 2021") # plot the histogram to see the price dsitribution accross 2000 airbnbs plt.hist(y_numbers, bins=3000, alpha=0.5) # I wonder if more expensive places in Seattle are less likely to be booked? # first I will create a new data frame that only include booking status and price infor df_booked = df[["available", "price"]] df_booked # But this df_booked data frame needs to be split # I want to separate the free rooms (indiacted as f in the available column # from the taken rooms (indicated as t in teh available column) # I will store the newly split data in their own data frames to easily get each pricing averagge # filter rows for't' which indciates the room is taken, using the boolean expression df_t = df_booked[df_booked["available"] == "t"] t_price_numbers = df_t["price"].replace("[\$,]", "", regex=True).astype(float) df_t.tail() # filter rows for 'f' which indciates the room is free, using the boolean expression df_f = df_booked[df_booked["available"] == "f"] f_price_numbers = df_f["price"].replace("[\$,]", "", regex=True).astype(float) df_f.tail() # now that the numbers have been converted to floats # I want to find the means of taken and free airbnbs and graph them # define names and values for bar graph names = ["booked", "available"] # lable the y axis for clarity plt.ylabel("average price ($)") # values are the means of both booked and availble prices values = [t_price_numbers.mean(), f_price_numbers.mean()] # plot the graph and show it plt.bar(names, values) plt.show()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score, cross_val_predict from sklearn.svm import SVC from sklearn.metrics import classification_report, confusion_matrix from sklearn.metrics import accuracy_score df = pd.read_csv("/kaggle/input/ckdisease/kidney_disease.csv") df.head() # Map text to 1/0 and do some cleaning df[["htn", "dm", "cad", "pe", "ane"]] = df[["htn", "dm", "cad", "pe", "ane"]].replace( to_replace={"yes": 1, "no": 0} ) df[["rbc", "pc"]] = df[["rbc", "pc"]].replace(to_replace={"abnormal": 1, "normal": 0}) df[["pcc", "ba"]] = df[["pcc", "ba"]].replace( to_replace={"present": 1, "notpresent": 0} ) df[["appet"]] = df[["appet"]].replace(to_replace={"good": 1, "poor": 0, "no": np.nan}) df["classification"] = df["classification"].replace( to_replace={"ckd": 1.0, "ckd\t": 1.0, "notckd": 0.0, "no": 0.0} ) df.rename(columns={"classification": "class"}, inplace=True) # Further cleaning df["pe"] = df["pe"].replace( to_replace="good", value=0 ) # Not having pedal edema is good df["appet"] = df["appet"].replace(to_replace="no", value=0) df["cad"] = df["cad"].replace(to_replace="\tno", value=0) df["dm"] = df["dm"].replace(to_replace={"\tno": 0, "\tyes": 1, " yes": 1, "": np.nan}) df.drop("id", axis=1, inplace=True) df.head() df2 = df.dropna(axis=0) df2["class"].value_counts() df2.apply(pd.to_numeric) df2.dtypes for i in range(0, df2.shape[1]): if df2.dtypes[i] == "object": print(df2.columns[i], "<--- having object datatype") df2["pcv"] = df2.pcv.astype(float) df2["wc"] = df2.wc.astype(float) df2["rc"] = df2.rc.astype(float) df2["dm"] = df2.dm.astype(float) df2.dtypes df2["class"] = df2["class"].astype(int) X = df2.drop("class", axis=1) X = StandardScaler().fit_transform(X) y = df2["class"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=0, stratify=df2["class"] ) model = SVC() parameters = [ {"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]}, {"kernel": ["linear"], "C": [1, 10, 100, 1000]}, ] grid = GridSearchCV(estimator=model, param_grid=parameters, cv=5) grid.fit(X_train, y_train) roc_auc = np.around( np.mean(cross_val_score(grid, X_test, y_test, cv=5, scoring="roc_auc")), decimals=4 ) print("Score: {}".format(roc_auc)) model1 = RandomForestClassifier(n_estimators=1000) tuned_parameters = [ { "n_estimators": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16], "max_depth": [2, 3, 4, 5, 6, None], "class_weight": [None, {0: 0.33, 1: 0.67}, "balanced"], "random_state": [42], } ] clf = GridSearchCV(model1, tuned_parameters, cv=10, scoring="roc_auc") clf.fit(X_train, y_train) score1 = np.mean(cross_val_score(model1, X_test, y_test, cv=5, scoring="roc_auc")) np.around(score1, decimals=4) df2 = df.dropna(axis=0) no_na = df2.index.tolist() some_na = df.drop(no_na).apply(lambda x: pd.to_numeric(x, errors="coerce")) some_na = some_na.fillna(0) # Fill up all Nan by zero. clf_best = clf.best_estimator_ X_test = some_na.iloc[:, :-1] y_test = some_na["class"] y_true = y_test lr_pred = clf_best.predict(X_test) print(classification_report(y_true, lr_pred)) confusion = confusion_matrix(y_test, lr_pred) print("Confusion Matrix:") print(confusion) score2 = accuracy_score(y_true, lr_pred) print("Score: %3f" % score2) model2 = KNeighborsClassifier() model2.fit(X_train, y_train) score3 = np.around( np.mean(cross_val_score(model2, X_test, y_test, cv=5, scoring="roc_auc")), decimals=4, ) print("Score : {}".format(score3)) model3 = LogisticRegression() parameters = {"C": [0.001, 0.01, 0.1, 1, 10, 100]} grid = GridSearchCV(estimator=model3, param_grid=parameters, cv=5) grid.fit(X_train, y_train) score4 = np.around( np.mean(cross_val_score(model3, X_test, y_test, cv=5, scoring="roc_auc")), decimals=4, ) print("Score : {}".format(score4)) names = [] scores = [] names.extend(["RF", "KNN", "LR"]) scores.extend([score2, score3, score4]) alg = pd.DataFrame({"Score": scores}, index=names) print("Most Accurate : \n{}".format(alg.loc[alg["Score"].idxmax()]))
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # LOAD LIBRARIES import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler import pandas as pd sample_submission = pd.read_csv("../input/digit-recognizer/sample_submission.csv") test = pd.read_csv("../input/digit-recognizer/test.csv") train = pd.read_csv("../input/digit-recognizer/train.csv") # PREPARE DATA FOR NEURAL NETWORK Y_train = train["label"] X_train = train.drop(labels=["label"], axis=1) X_train = X_train / 255.0 X_test = test / 255.0 X_train = X_train.values.reshape(-1, 28, 28, 1) X_test = X_test.values.reshape(-1, 28, 28, 1) Y_train = to_categorical(Y_train, num_classes=10) import matplotlib.pyplot as plt # PREVIEW IMAGES plt.figure(figsize=(15, 4.5)) for i in range(30): plt.subplot(3, 10, i + 1) plt.imshow(X_train[i].reshape((28, 28)), cmap=plt.cm.binary) plt.axis("off") plt.subplots_adjust(wspace=-0.1, hspace=-0.1) plt.show() # CREATE MORE IMAGES VIA DATA AUGMENTATION datagen = ImageDataGenerator( rotation_range=10, zoom_range=0.10, width_shift_range=0.1, height_shift_range=0.1 ) # PREVIEW AUGMENTED IMAGES X_train3 = X_train[9,].reshape((1, 28, 28, 1)) Y_train3 = Y_train[9,].reshape((1, 10)) plt.figure(figsize=(15, 4.5)) for i in range(30): plt.subplot(3, 10, i + 1) X_train2, Y_train2 = datagen.flow(X_train3, Y_train3).next() plt.imshow(X_train2[0].reshape((28, 28)), cmap=plt.cm.binary) plt.axis("off") if i == 9: X_train3 = X_train[11,].reshape((1, 28, 28, 1)) if i == 19: X_train3 = X_train[18,].reshape((1, 28, 28, 1)) plt.subplots_adjust(wspace=-0.1, hspace=-0.1) plt.show() # BUILD CONVOLUTIONAL NEURAL NETWORKS nets = 15 model = [0] * nets for j in range(nets): model[j] = Sequential() model[j].add(Conv2D(32, kernel_size=3, activation="relu", input_shape=(28, 28, 1))) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size=3, activation="relu")) model[j].add(BatchNormalization()) model[j].add( Conv2D(32, kernel_size=5, strides=2, padding="same", activation="relu") ) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64, kernel_size=3, activation="relu")) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size=3, activation="relu")) model[j].add(BatchNormalization()) model[j].add( Conv2D(64, kernel_size=5, strides=2, padding="same", activation="relu") ) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(128, kernel_size=4, activation="relu")) model[j].add(BatchNormalization()) model[j].add(Flatten()) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation="softmax")) # COMPILE WITH ADAM OPTIMIZER AND CROSS ENTROPY COST model[j].compile( optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] ) # DECREASE LEARNING RATE EACH EPOCH annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95**x) # TRAIN NETWORKS history = [0] * nets epochs = 20 for j in range(nets): X_train2, X_val2, Y_train2, Y_val2 = train_test_split( X_train, Y_train, test_size=0.1 ) history[j] = model[j].fit_generator( datagen.flow(X_train2, Y_train2, batch_size=66), epochs=epochs, steps_per_epoch=X_train2.shape[0] // 66, validation_data=(X_val2, Y_val2), callbacks=[annealer], verbose=0, ) print( "CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( j + 1, epochs, max(history[j].history["accuracy"]), max(history[j].history["val_accuracy"]), ) ) # ENSEMBLE PREDICTIONS AND SUBMIT results = np.zeros((X_test.shape[0], 10)) for j in range(nets): results = results + model[j].predict(X_test) results = np.argmax(results, axis=1) results = pd.Series(results, name="Label") submission = pd.concat([pd.Series(range(1, 28001), name="ImageId"), results], axis=1) submission.to_csv("submission_digit.csv", index=False)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pycountry import plotly.express as px # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. df = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv") df.drop("Sno", axis=1, inplace=True) df.head() df.info() countries = {} for country in pycountry.countries: countries[country.name] = country.alpha_3 df["iso_alpha"] = df["Country"].map(countries.get) data = df.groupby("iso_alpha").sum().reset_index() fig_Confirmed = px.choropleth( data, locations="iso_alpha", color="Confirmed", hover_name="iso_alpha", color_continuous_scale=px.colors.sequential.Plasma, ) fig_Confirmed.show() fig_Deaths = px.choropleth( data, locations="iso_alpha", color="Deaths", hover_name="iso_alpha", color_continuous_scale=px.colors.sequential.Plasma, ) fig_Deaths.show() fig_Recovered = px.scatter_geo( data, locations="iso_alpha", color="Recovered", hover_name="iso_alpha", size="Recovered", ) fig_Recovered.show()
# # Titanic Competition # This is the notebook of my first all alone, no tutorials, Kaggle competition. # It will be used for the study of data cleaning, exploratory analysis, data visualization, and machine learning models. # Feel free to make any comments, suggestions and critics. import numpy as np import pandas as pd import seaborn as sns import matplotlib as plt # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ## Reading and separating Data from sklearn.model_selection import train_test_split # Reading Data X = pd.read_csv("../input/titanic/train.csv") X_test_full = pd.read_csv("../input/titanic/test.csv") # Putting target variable in 'y' and dropping that column from X_train y = X["Survived"] X = X.drop(["Survived"], axis=1) # Breaking off X in train and validation datasets X_train_full, X_valid_full, y_train, y_valid = train_test_split( X, y, train_size=0.8, test_size=0.2, random_state=0 ) # ## Preprocessing # ### Exploring and treating missing data # Here I check if there's any missing values in the dataset, counting them by columns to choose how to treat them. # Number of missing values in each column of X_train_full X_train_full.isnull().sum() # Number of missing values in each column of X (train + valid) X.isnull().sum() # Now we know that only 3 features have missing values: 'Age' (Age of the passengers), Cabin (Cabin number of the passenger) and Embarked (port of embarkation). # We will treat these 3 columns differently according to the type of data they represent (numerical or categorical) and to the number of missing values they have. # 1. The **Cabin** data could be useful finding a correlation between the cabins and survival rate since people in the cabins nearest to the rescue boats could have a higher chance of survive (assuming that they were in the cabin when the ship started to sink). Although, as a start point, I chose to drop the cabin column, since it has more than 77% of the data missing. # 1. The NA values **Embarked** column will be treated with an imputer puting the most frequent value (which is S for Southampton, with 72% of the entries). Since it's only two values, the treatment will not have a huge impact on the model. Another possibility could be erase those two entries from the dataset. Besides that, a further investigation of the data of those two entries could possibly let us infer these missing values. # 1. The NA values in the **Age** column are the most problematic, since it has large number of missing values (177), but not in a proportion huge enough for us to discard the column (177 out of 891 accounts for approximately 20%). Moreover, basing in common sense, age could play a huge role in survival rate, since children and elders could have more difficulty to acess the rescue boats, or, on the other hand, have preference in the embark. Having said that, the first method of treatment of the missing values will be an imputer by the mean, bearing in mind that a more refined method will certainly bring much better results. # ### Strategy to deal with categorical data # The dataset has 5 categorical columns: 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'. The Cabin column was already ruled out by the amount of missing values it had. The question here is what strategy will I use to work with the other categorical columns? In a first glance, and for sake of simplicity in this first try, I will drop 'Name' and 'Ticket' columns and use One Hot Encoding for the rest. # * The 'Name' column, based on common sense, probably wouldn't have a big impact on survival rate (although the title before the names could indicate some sort of socio-economic differentiation). # * The 'Ticket' comlumn have a very high cardinality (681 unique entries out of 891 samples) and couldn't be treated by an One Hot Encoding, so I ruled out as well in this first try. # ### Preprocessor code from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder # Droping columns from train and validation sets drop_columns = ["Cabin", "Name", "Ticket"] X_train = X_train_full.drop(drop_columns, axis=1) X_valid = X_valid_full.drop(drop_columns, axis=1) # Creating the transformers numeric_transformer = SimpleImputer(strategy="mean") categoric_transformer = Pipeline( steps=[ ("imputer", SimpleImputer(strategy="most_frequent")), ("onehot", OneHotEncoder()), ] ) categoric_cols = ["Sex", "Embarked"] # Creating the pipeline with ColumnTransformer preprocessor = ColumnTransformer( transformers=[ ("imputer_numeric", numeric_transformer, ["Age"]), ("imputer_categoric", categoric_transformer, categoric_cols), ] ) # Fitting the preprocessor before the ML model # preprocessor.fit(X_train) # X_train = preprocessor.transform(X_train) # X_valid = preprocessor.transform(X_valid) # ## Machine Learning Model # On a first try, I will use the Logistic Regression model. from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LogisticRegression from sklearn.metrics import mean_absolute_error from sklearn.metrics import accuracy_score # def get_accuracy(n_estimators, X_train, X_valid, y_train, y_valid): # model = RandomForestRegressor(n_estimators=n_estimators, random_state=0) # func_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)]) # func_pipeline.fit(X_train,y_train) # preds_val = func_pipeline.predict(X_valid) # score = accuracy_score(y_valid, preds_val) # return(score) # Defining Model model = LogisticRegression() # Bundle preprocessing and modeling code in a Pipeline my_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)]) my_pipeline.fit(X_train, y_train) preds_val = my_pipeline.predict(X_valid) # Accuracy of the prediction accuracy_score(y_valid, preds_val) # ## Output X_test = X_test_full.drop(["Name", "Cabin", "Ticket"], axis=1) preds_test = my_pipeline.predict(X_test) # Save test predictions to file output = pd.DataFrame({"PassengerId": X_test_full.index, "Survived": preds_test}) output.to_csv("submission.csv", index=False)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import seaborn as sns import matplotlib.pyplot as plt data = pd.read_csv( "/kaggle/input/2019-coronavirus-dataset-01212020-01262020/2019_nC0v_20200121_20200126_cleaned.csv" ) data.head() data.describe() data["Country"] = data["Country"].replace("Mainland China", "China") data df_countries = ( data.groupby(["Country", "Date last updated"]) .sum() .sort_values("Suspected", ascending=False) ) df_countries
# # BERT with KFold # ## References # * https://www.kaggle.com/xhlulu/disaster-nlp-keras-bert-using-tfhub # * https://qiita.com/koshian2/items/81abfc0a75ea99f726b9 # We will use the official tokenization script created by the Google team import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.layers import Dense, Input, LSTM, Bidirectional from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model, load_model from tensorflow.keras.callbacks import ModelCheckpoint import tensorflow_hub as hub import tokenization def bert_encode(texts, tokenizer, max_len=512): all_tokens = [] all_masks = [] all_segments = [] for text in texts: text = tokenizer.tokenize(text) text = text[: max_len - 2] input_sequence = ["[CLS]"] + text + ["[SEP]"] pad_len = max_len - len(input_sequence) tokens = tokenizer.convert_tokens_to_ids(input_sequence) tokens += [0] * pad_len pad_masks = [1] * len(input_sequence) + [0] * pad_len segment_ids = [0] * max_len all_tokens.append(tokens) all_masks.append(pad_masks) all_segments.append(segment_ids) return np.array(all_tokens), np.array(all_masks), np.array(all_segments) def build_model(bert_layer, max_len=512): def inner_build_model(): input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids") input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask") segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids") _, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids]) clf_output = Bidirectional(LSTM(128))(sequence_output) # clf_output = sequence_output[:, 0, :] out = Dense(1, activation="sigmoid")(clf_output) model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out) model.compile( Adam(lr=2e-6), loss="binary_crossentropy", metrics=["accuracy", "mse"] ) return model return inner_build_model module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1" # module_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1' bert_layer = hub.KerasLayer(module_url, trainable=True) vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy() do_lower_case = bert_layer.resolved_object.do_lower_case.numpy() tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case) train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv") train["token_len"] = train["text"].apply(lambda x: len(tokenizer.tokenize(x))) test["token_len"] = test["text"].apply(lambda x: len(tokenizer.tokenize(x))) token_max_len = max(train["token_len"].max(), test["token_len"].max()) + 2 display(token_max_len) from sklearn.model_selection import train_test_split, KFold, cross_val_score def get_kfold_sets(train, k=5): kf = KFold(n_splits=k, shuffle=True) for train_texts, train_labels in kf.split(train.text.values, train.target.values): train_texts, valid_texts, train_labels, valid_labels = train_test_split( train.text.values, train.target.values, test_size=0.2 ) train_input = bert_encode(train_texts, tokenizer, max_len=token_max_len) valid_input = bert_encode(valid_texts, tokenizer, max_len=token_max_len) yield train_input, train_labels, valid_input, valid_labels def get_train_sets(train): train_input = bert_encode(train.text.values, tokenizer, max_len=token_max_len) train_labels = train.target.values return train_input, train_labels test_input = bert_encode(test.text.values, tokenizer, max_len=token_max_len) from sklearn.metrics import f1_score from keras.callbacks import Callback class F1Callback(Callback): def __init__(self, model, X_val, y_val): self.model = model self.X_val = X_val self.y_val = y_val def on_epoch_end(self, epoch, logs): pred = self.model.predict(self.X_val) f1_val = f1_score(self.y_val, np.round(pred)) print("\n f1_val = ", f1_val) model_template = build_model(bert_layer, max_len=token_max_len)() model_template.summary() from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.models import clone_model def cross_val_score(train, k=3, epochs=10, batch_size=16): f1_vals = [] models = [] i = 0 for train_input, train_labels, valid_input, valid_labels in get_kfold_sets( train, k=k ): model = clone_model(model_template) model.compile( Adam(lr=2e-6), loss="binary_crossentropy", metrics=["accuracy", "mse"] ) train_history = model.fit( train_input, train_labels, validation_data=(valid_input, valid_labels), epochs=epochs, batch_size=batch_size, callbacks=[ EarlyStopping(patience=1, monitor="val_mse", mode="min", verbose=True) ], ) pred = model.predict(valid_input) f1_val = f1_score(valid_labels, np.round(pred)) print(f"f1-val: {f1_val}") f1_vals.append(f1_val) models.append(model) df = pd.DataFrame(train_history.history) df["f1-val"] = f1_val df.to_csv(f"history_{i}.csv") i += 1 return np.array(f1_vals).mean(), models k = 5 f1_val, models = cross_val_score(train, k=k) print(f"f1-mean: {f1_val}") train_input, train_labels = get_train_sets(train) def calc_best_threshold(pred, labels): f1_vals = [] ts = [] for t in np.arange(0.1, 1, 0.1): f1_val = f1_score(train_labels, [1 if p >= t else 0 for p in train_pred]) f1_vals.append(f1_val) ts.append(t) return ts[np.argmax(f1_vals)] best_ts = [] for model in models: train_pred = model.predict(train_input) tmp = calc_best_threshold(train_pred, train_labels) best_ts.append(tmp) print(f"best ts: {best_ts}") test_preds = [] for model in models: test_pred = model.predict(test_input) test_preds.append(test_pred) test_preds = np.array(test_preds) print(test_preds.shape) test_size = test_preds.shape[1] mean_pred = [] for s in range(test_size): tmp = [] for i in range(k): # tmp.append(test_preds[i][s][0].round()) tmp.append(1 if test_preds[i][s][0] >= best_ts[i] else 0) mean = np.mean(tmp) mean_pred.append(mean) mean_pred = np.array(mean_pred) print(mean_pred.shape) print(mean_pred[20:]) print(mean_pred[:20]) submission["target"] = mean_pred.round().astype(int) submission.to_csv("submission.csv", index=False)
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv("/kaggle/input/covid19-in-usa/us_counties_covid19_daily.csv") df # **Data Cleaning** # Here we will make Day Date & year seperate Columns for better understanding df["Date"] = pd.to_datetime(df["date"]) df["Year"] = df["Date"].dt.year df["Month"] = df["Date"].dt.month df["Day"] = df["Date"].dt.day df1 = df.drop("date", axis=1) df1 # In this section we will see what is our dataset is all about , its columns & columns datatypes. df1.info() df1.columns df1["county"].unique() df1["state"].unique() df1["state"].shape df1["Year"].unique() df1["Month"].unique() df1["Day"].unique() df1.head(2) sns.countplot(x="state", data=df1.head(100)) plt.show() # from the graph we can conclude that max count of state which shows california has max 42 Count plt.figure(figsize=(10, 12)) plt.subplots_adjust(wspace=0.3) plt.subplot(221) sns.barplot(x="state", y="cases", data=df1.head(50)) plt.xticks(rotation=90) plt.subplot(222) sns.barplot(x="county", y="cases", data=df1.head(50)) plt.xticks(rotation=90) # * From the aove graph we can conclude that cases found in county which shows that san francisco has maximum cases 2 # * From the above graph we can conclude that deaths in state which shows that san francisco has ,maximum deaths are in washington 3.5 df1["county"].unique() plt.figure(figsize=(15, 8)) sns.barplot(x="county", y="deaths", data=df1.head(550)) plt.xticks(rotation=90) # * From the above graph we can conclude that deaths in county which shows that san francisco has maximum deaths 2 plt.figure(figsize=(15, 10)) sns.barplot(x="state", y="deaths", data=df1) plt.xticks(rotation=90)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # ### Loading Dataset # importing required libraraies. import pandas as pd import numpy as np test_df = pd.read_csv("/kaggle/input/titanic/test.csv") train_df = pd.read_csv("/kaggle/input/titanic/train.csv") # ### Exploring the dataset # train_df.head() train_df.describe() train_df.shape train_df.isnull() train_df.isnull().sum() missing_val_count = train_df.isnull().sum().sum() print(f"The number of missing values in the dataset is: {missing_val_count}") # > Dropping columns with missing values can make sense since there are over 50 % of NAN values in the columns # ### Cleaning the Data set # X_full = train_df.drop(columns=["Cabin"]) X_test = test_df.drop(columns=["Cabin"]) X_full.head() Age_interpolated = X_full["Age"].interpolate(method="linear") X_test_age = X_test["Age"].interpolate(method="linear") Age_interpolated final_df = X_full.replace(X_full["Age"], Age_interpolated) final_X_test = X_test.replace(X_test["Age"], X_test_age) final_df.tail() final_df.columns # ### creating target and features features_name = [ "PassengerId", "Pclass", "Name", "Sex", "Age", "SibSp", "Parch", "Ticket", "Fare", "Embarked", ] X = final_df[features_name] y = final_df.Survived # ### Dealing with categorical columns X_num_col = [col for col in X.columns if X[col].dtypes in ["int64", "float64"]] X_test_num_col = [ col for col in final_X_test.columns if final_X_test[col].dtypes in ["int64", "float64"] ] X_cat_col = [col for col in X.columns if X[col].dtypes == "object"] print(X_cat_col) X_test_cat_col = [ col for col in final_X_test.columns if final_X_test[col].dtypes == "object" ] from sklearn.preprocessing import OneHotEncoder
# # NY Rental Properties Pricing # # Dataset is obtained from kaggle # https://www.kaggle.com/datasets/ivanchvez/ny-rental-properties-pricing import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/ny-rental-properties-pricing/NY Realstate Pricing.csv") df df.shape df.head() df.columns df.info() df.describe() df.isnull().sum() df.isnull() df.isnull().sum().sum() len(df["F1"].unique()) df["room_type"].value_counts() df.drop(["F1", "id"], axis=1, inplace=True) df.shape for col in df.columns: unique_vals = df[col].unique() print(f"Unique values in column {col}: {unique_vals}") for column in df.columns: unique_values = df[column].nunique() value_counts = df[column].value_counts() print(f"Column name: {column}") print(f"Number of unique values: {unique_values}") print(f"Value counts:\n{value_counts}\n") df.duplicated().sum() # Histogram of price plt.figure(figsize=(10, 8)) sns.histplot(x="price", data=df, bins=50) plt.title("Histogram of Price") plt.show() # Bar chart of room type plt.figure(figsize=(10, 8)) sns.countplot(x="room_type", data=df) plt.title("Count of Room Types") plt.show() # Scatterplot of price vs minimum nights plt.figure(figsize=(10, 8)) sns.scatterplot(x="minimum_nights", y="price", data=df) plt.title("Price vs Minimum Nights") plt.show() # Correlation matrix corr = df.corr() plt.figure(figsize=(10, 8)) sns.heatmap(corr, annot=True, cmap="coolwarm") plt.title("Correlation Matrix") plt.show() from autoviz import data_cleaning_suggestions data_cleaning_suggestions(df) df.drop(["latitude", "longitude"], axis=1, inplace=True) for i in df.columns: if df[i].dtype == "object": df[i] = df[i].astype("category").cat.codes df.dtypes x = df.drop(columns=["price"]) y = df["price"] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=42 ) from sklearn.linear_model import LinearRegression # Install the LinearRegression model to predict the cost linear = LinearRegression() # fit the model to the train data linear.fit(x_train, y_train) linear_pred = linear.predict(x_test) linear_pred acc_lr = linear.score(x_test, y_test) print(linear.score(x_test, y_test)) print(linear.score(x_train, y_train)) sns.distplot(df["price"], bins=70) plt.show() from sklearn.linear_model import LinearRegression, Ridge, Lasso from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OneHotEncoder X = df.drop(columns=["price"]) y = df["price"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) from sklearn.metrics import classification_report # Define the models models = { "Linear Regression": LinearRegression(), "Ridge Regression": Ridge(alpha=1), "Lasso Regression": Lasso(alpha=1), "Decision Tree Regression": DecisionTreeRegressor(max_depth=5), "Random Forest Regression": RandomForestRegressor(n_estimators=100, max_depth=5), } # Train and evaluate the models for name, model in models.items(): model.fit(X_train, y_train) y_pred = model.predict(X_test) # Evaluate the model r2 = r2_score(y_test, y_pred) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) print("Model:", name) print("R^2:", r2) print("Mean Squared Error:", mse) print("Mean Absolute Error:", mae) # Plot the results plt.figure(figsize=(10, 6)) plt.title(f"{name} - Actual vs. Predicted") plt.scatter(y_test, y_pred) plt.xlabel("Actual") plt.ylabel("Predicted") plt.show()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import cv2 import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os import keras from keras.preprocessing.image import * from keras.layers import * from keras.models import Sequential from keras.callbacks import * face_mask_detection_dir = "../input/face-mask-detection/images" with_without_mask_train = "../input/withwithout-mask/maskdata/maskdata/train" with_without_mask_test = "../input/withwithout-mask/maskdata/maskdata/test" with_mask_train_dir = os.path.join(with_without_mask_train, "with_mask") without_mask_train_dir = os.path.join(with_without_mask_train, "without_mask") with_mask_test_dir = os.path.join(with_without_mask_test, "with_mask") without_mask_test_dir = os.path.join(with_without_mask_test, "without_mask") categories = os.listdir(with_without_mask_train) labels = [i for i in range(len(categories))] label_dict = dict(zip(categories, labels)) # empty dictionary print(label_dict) print(categories) print(labels) img_size = 100 data = [] target = [] for category in categories: folder_path = os.path.join(with_without_mask_train, category) img_names = os.listdir(folder_path) for img_name in img_names: img_path = os.path.join(folder_path, img_name) img = cv2.imread(img_path) try: gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) resized = cv2.resize(gray, (img_size, img_size)) data.append(resized) target.append(label_dict[category]) except Exception as e: print("Exception:", e) data = np.array(data) / 255.0 data = np.reshape(data, (data.shape[0], img_size, img_size, 1)) target = np.array(target) from keras.utils import np_utils new_target = np_utils.to_categorical(target) np.save("data", data) np.save("target", new_target) data = np.load("data.npy") target = np.load("target.npy") from keras.models import Sequential from keras.layers import Dense, Activation, Flatten, Dropout from keras.layers import Conv2D, MaxPooling2D from keras.callbacks import ModelCheckpoint model = Sequential() model.add(Conv2D(200, (3, 3), input_shape=data.shape[1:])) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) # The first CNN layer followed by Relu and MaxPooling layers model.add(Conv2D(100, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) # The second convolution layer followed by Relu and MaxPooling layers model.add(Flatten()) model.add(Dropout(0.5)) # Flatten layer to stack the output convolutions from second convolution layer model.add(Dense(50, activation="relu")) # Dense layer of 64 neurons model.add(Dense(2, activation="softmax")) # The Final layer with two outputs for two categories model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) checkpoint = ModelCheckpoint( "model-{epoch:03d}.model", monitor="val_loss", verbose=0, save_best_only=True, mode="auto", ) history = model.fit( data, target, epochs=20, callbacks=[checkpoint], validation_split=0.2 ) plt.plot(history.history["accuracy"], "r", label="training accuracy") plt.plot(history.history["val_accuracy"], label="validation accuracy") plt.xlabel("# epochs") plt.ylabel("loss") plt.legend() plt.show() categories = os.listdir(with_without_mask_test) labels = [i for i in range(len(categories))] label_dict = dict(zip(categories, labels)) # empty dictionary print(label_dict) print(categories) print(labels) img_size = 100 data_test = [] target_test = [] for category in categories: folder_path = os.path.join(with_without_mask_test, category) img_names = os.listdir(folder_path) for img_name in img_names: img_path = os.path.join(folder_path, img_name) img = cv2.imread(img_path) try: gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) resized = cv2.resize(gray, (img_size, img_size)) data_test.append(resized) target_test.append(label_dict[category]) except Exception as e: print("Exception:", e) data_test = np.array(data_test) / 255.0 data_test = np.reshape(data_test, (data_test.shape[0], img_size, img_size, 1)) target_test = np.array(target_test) from keras.utils import np_utils new_target_test = np_utils.to_categorical(target_test) np.save("data_test", data_test) np.save("target_test", new_target_test) data_test = np.load("data.npy") target_test = np.load("target.npy") print(model.evaluate(data_test, target_test)) # As you can see the model has approximately 97% accuracy! from keras.models import load_model import cv2 import numpy as np # model = load_model('model-017.model') face_clsfr = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") source = cv2.VideoCapture(2) labels_dict = {0: "MASK", 1: "NO MASK"} color_dict = {0: (0, 255, 0), 1: (0, 0, 255)} while True: ret, img = source.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_clsfr.detectMultiScale(gray, 1.3, 5) for x, y, w, h in faces: face_img = gray[y : y + w, x : x + w] resized = cv2.resize(face_img, (100, 100)) normalized = resized / 255.0 reshaped = np.reshape(normalized, (1, 100, 100, 1)) result = model.predict(reshaped) label = np.argmax(result, axis=1)[0] cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2) cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1) cv2.putText( img, labels_dict[label], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2, ) cv2.imshow("LIVE", img) key = cv2.waitKey(1) if key == 27: break cv2.destroyAllWindows() source.release()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import torch import torchvision import torchvision.models as models import torchvision.datasets as datasets import torchvision.transforms as T import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader, TensorDataset, random_split from torchvision.models.segmentation.deeplabv3 import DeepLabHead import matplotlib.pyplot as plt import seaborn as sn # Support libraries import numpy as np import pandas as pd import itertools import os import random from PIL import Image class ParticleSeg(Dataset): def __init__( self, root_dir: str, img_transforms: object = None, mask_transforms: object = None, ): super().__init__() self.root_dir = root_dir self.transforms_mask = mask_transforms self.transforms_img = img_transforms self.mask_dir = os.path.join(root_dir, "segmaps") self.img_dir = os.path.join(root_dir, "images") self.file_names = os.listdir(self.mask_dir) self.mask_paths = [ os.path.join(self.mask_dir, mask_name) for mask_name in self.file_names ] self.img_paths = [ os.path.join(self.img_dir, img_name) for img_name in self.file_names ] def __len__(self): return len(self.file_names) def __getitem__(self, ix: int): seed = np.random.randint(2022) random.seed(seed) torch.manual_seed(seed) mask_path, img_path = self.mask_paths[ix], self.img_paths[ix] mask, img = Image.open(mask_path), Image.open(img_path).convert("RGB") if self.transforms_img is not None: img = self.transforms_img(img) random.seed(seed) torch.manual_seed(seed) if self.transforms_mask is not None: mask = self.transforms_mask(mask) mask, img = mask.float(), img.float() mask[mask > 0] = 1 # img = img / 255 return img, mask img_transforms = T.Compose( [ T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), T.Resize((244, 244)), ] ) mask_transforms = T.Compose([T.ToTensor(), T.Resize((244, 244))]) dataset = ParticleSeg( root_dir="/kaggle/input/electron-microscopy-particle-segmentation", img_transforms=img_transforms, mask_transforms=mask_transforms, ) mask, img = dataset[10] fig, ax = plt.subplots(nrows=1, ncols=2) pos1 = ax[0].imshow(img.permute((1, 2, 0))) pos2 = ax[1].imshow(mask.permute((1, 2, 0))) dataset_train, dataset_val = random_split( dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8 * len(dataset))] ) print(len(dataset_train), len(dataset_val)) dataloader_train = DataLoader(dataset_train, batch_size=16) dataloader_val = DataLoader(dataset_val, batch_size=16) def createDeepLabv3(outputchannels=1): model = models.segmentation.deeplabv3_resnet50(pretrained=True, progress=True) model.classifier = DeepLabHead(2048, outputchannels) return model device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = createDeepLabv3(1).to(device) criterion = nn.BCEWithLogitsLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) epochs = 50 def train(model, dataloader): model.train() train_loss = 0 for data in dataloader: optimizer.zero_grad() images, masks = data[0].to(device), data[1].to(device) out = model(images)["out"] loss = criterion(out, masks) train_loss += loss.item() loss.backward() optimizer.step() return train_loss / len(dataloader) def test(model, dataloader): model.eval() test_loss = 0 for data in dataloader: images, masks = data[0].to(device), data[1].to(device) out = model(images)["out"] loss = criterion(out, masks) test_loss += loss.item() return test_loss / len(dataloader) for epoch in range(epochs): train_loss = train(model, dataloader_train) test_loss = test(model, dataloader_val) print( "Epoch [{}/{}], Train loss: {:.4f}, Val loss {:.4f}".format( epoch + 1, epochs, train_loss, test_loss ) ) dataiter = iter(dataloader_train) images, masks = next(dataiter) images = images.to(device) output = model(images)["out"] plt.imshow(images[15].cpu().permute((1, 2, 0))) plt.show() plt.imshow(output[15].cpu().detach().permute((1, 2, 0)))
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. digit_recon_tran_csv = pd.read_csv( "/kaggle/input/digit-recognizer/train.csv", dtype=np.float32 ) digit_recon_test_csv = pd.read_csv( "/kaggle/input/digit-recognizer/test.csv", dtype=np.float32 ) print("tran dataset size: ", digit_recon_tran_csv.size, "\n") print("test dataset size: ", digit_recon_test_csv.size, "\n") # print(digit_recon_tran_csv.head(1)) # print(digit_recon_tran_csv.head(1).label) tran_label = digit_recon_tran_csv.label.values tran_image = ( digit_recon_tran_csv.loc[:, digit_recon_tran_csv.columns != "label"].values / 255 ) # normalization test_image = digit_recon_test_csv.values / 255 print("train label size: ", tran_label.shape) print("train image size: ", tran_image.shape) print("test image size: ", test_image.shape) from sklearn.model_selection import train_test_split train_image, valid_image, train_label, valid_label = train_test_split( tran_image, tran_label, test_size=0.2, random_state=42 ) # print("train size: ", train_image.shape) print("valid size: ", valid_image.shape) import torch import torch.nn as nn import numpy as np import torchvision from torch.utils.data import DataLoader, Dataset from torchvision import transforms print(torch.__version__) class MNIST_data(Dataset): """MNIST dtaa set""" def __init__( self, data, transform=transforms.Compose( [ transforms.ToPILImage(), transforms.RandomAffine(30, (0.1, 0.1)), transforms.ToTensor(), ] ), ): if len(data) == 1: # test data self.X = data[0].reshape(-1, 28, 28) self.y = None else: # training data self.X = data[0].reshape(-1, 28, 28) self.y = data[1].astype(np.long) self.transform = transform def __len__(self): return len(self.X) def __getitem__(self, idx): if self.y is not None: return self.transform(self.X[idx]), self.y[idx] else: return self.transform(self.X[idx]) # test mnist dataset import matplotlib.pyplot as plt test_mnist_data = MNIST_data((train_image, train_label)) test_mnist_loader = torch.utils.data.DataLoader( dataset=test_mnist_data, batch_size=1, shuffle=True ) for batch_idx, (images, labels) in enumerate(test_mnist_loader): plt.imshow(images.view(28, 28).numpy()) plt.axis("off") plt.title(str(labels.numpy())) plt.show() break # visual import matplotlib.pyplot as plt plt.imshow(test_image[10].reshape(28, 28)) plt.axis("off") plt.show() batch_size = 64 # 2^5=64 train_dataset = MNIST_data((train_image, train_label)) train_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=batch_size, shuffle=True ) valid_dataset = MNIST_data((valid_image, valid_label)) valid_loader = torch.utils.data.DataLoader( dataset=valid_dataset, batch_size=batch_size, shuffle=False ) class YANNet(nn.Module): def __init__(self): super(YANNet, self).__init__() self.conv = nn.Sequential( # size: 28*28 nn.Conv2d( 1, 8, 3, 1, 1 ), # in_channels out_channels kernel_size stride padding nn.ReLU(), nn.Conv2d(8, 16, 3, 1, 1), nn.ReLU(), nn.MaxPool2d(2), # size: 14*14 nn.Conv2d(16, 16, 3, 1, 1), nn.ReLU(), nn.Conv2d(16, 8, 3, 1, 1), nn.ReLU(), nn.MaxPool2d(2), ) self.fc = nn.Sequential( # size: 7*7 nn.Linear(8 * 7 * 7, 256), nn.ReLU(), nn.Dropout(0.5), nn.Linear(256, 256), nn.ReLU(), nn.Dropout(0.5), nn.Linear(256, 10), ) def forward(self, img): x = self.conv(img) o = self.fc(x.view(x.shape[0], -1)) return o model = YANNet() error = nn.CrossEntropyLoss() if torch.cuda.is_available(): model = model.cuda() error = error.cuda() optim = torch.optim.SGD(model.parameters(), lr=0.1) # exp_lr_scheduler = lr_scheduler.StepLR(optim, step_size=10, gamma=0.1) num_epoc = 10 from torch.autograd import Variable for epoch in range(num_epoc): epoc_train_loss = 0.0 epoc_train_corr = 0.0 epoc_valid_corr = 0.0 print("Epoch:{}/{}".format(epoch, num_epoc)) model.train() # exp_lr_scheduler.step() for batch_idx, (images, labels) in enumerate(train_loader): if torch.cuda.is_available(): images = images.cuda() labels = labels.cuda() images = Variable(images) labels = Variable(labels) outputs = model(images) optim.zero_grad() loss = error(outputs, labels) loss.backward() optim.step() epoc_train_loss += loss.data outputs = torch.max(outputs.data, 1)[1] epoc_train_corr += torch.sum(outputs == labels.data) with torch.no_grad(): model.eval() for batch_idx, (images, labels) in enumerate(valid_loader): if torch.cuda.is_available(): images = images.cuda() labels = labels.cuda() images = Variable(images) labels = Variable(labels) outputs = model(images) outputs = torch.max(outputs.data, 1)[1] epoc_valid_corr += torch.sum(outputs == labels.data) print( "loss is :{:.4f},Train Accuracy is:{:.4f}%,Test Accuracy is:{:.4f}%".format( epoc_train_loss / len(train_dataset), 100 * epoc_train_corr / len(train_dataset), 100 * epoc_valid_corr / len(valid_dataset), ) ) model = model.cpu() model.eval() plt.imshow(test_image[100].reshape(28, 28)) plt.axis("off") plt.show() one_test = test_image[100] one_test = torch.from_numpy(one_test).view(1, 1, 28, 28) one_output = model(one_test) print(torch.max(one_output.data, 1)[1].numpy()) digit_recon_submission_csv = pd.read_csv( "/kaggle/input/digit-recognizer/sample_submission.csv", dtype=np.float32 ) print(digit_recon_submission_csv.head(10)) print(test_image.shape) test_results = np.zeros((test_image.shape[0], 2), dtype="int32") print(test_results.shape) for i in range(test_image.shape[0]): one_image = torch.from_numpy(test_image[i]).view(1, 1, 28, 28) one_output = model(one_image) test_results[i, 0] = i + 1 test_results[i, 1] = torch.max(one_output.data, 1)[1].numpy() print(test_results.shape) Data = {"ImageId": test_results[:, 0], "Label": test_results[:, 1]} DataFrame = pd.DataFrame(Data) DataFrame.to_csv("submission.csv", index=False, sep=",")
# # # Table of Contents # 1. [Introduction](#introduction) # 1. [Install libraries and packages](#install_libraries_and_packages) # 1. [Import libraries](#import_libraries) # 1. [Configure hyper-parameters](#configure_hyper_parameters) # 1. [Define useful classes](#define_useful_classes) # 1. [Start the inference process](#start_the_inference_process) # 1. [Save the submission](#save_the_submission) # 1. [Conclusion](#conclusion) # # Introduction # So, I have successfully trained a classifier by using a bunch of datasets listed in the [*Other useful datasets*](https://www.kaggle.com/c/deepfake-detection-challenge/discussion/128954) discussion and upload it into Kaggle as an external [*dataset*](https://www.kaggle.com/phunghieu/dfdcmultifacef5-resnet18). Now, let's use this model to infer all videos in the test set then complete this end-to-end solution by submitting the final result to the host :) . # If you do not know how to train the classifier, please follow this [*link*](https://www.kaggle.com/phunghieu/dfdc-multiface-training). # --- # ## Multiface's general diagram # ![diagram](data:image/svg+xml,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22UTF-8%22%3F%3E%0A%3C%21DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%0A%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20version%3D%221.1%22%20width%3D%221368%22%20height%3D%22389%22%20viewBox%3D%22-0.5%20-0.5%201368%20389%22%20content%3D%22%26lt%3Bmxfile%20host%3D%26quot%3BElectron%26quot%3B%20modified%3D%26quot%3B2020-02-16T02%3A32%3A31.746Z%26quot%3B%20agent%3D%26quot%3BMozilla%2F5.0%20%28X11%3B%20Linux%20x86_64%29%20AppleWebKit%2F537.36%20%28KHTML%2C%20like%20Gecko%29%20draw.io%2F12.2.2%20Chrome%2F78.0.3904.94%20Electron%2F7.1.0%20Safari%2F537.36%26quot%3B%20etag%3D%26quot%3B8-_NGCFXhlIKuX6CvSf-%26quot%3B%20version%3D%26quot%3B12.2.2%26quot%3B%20type%3D%26quot%3Bdevice%26quot%3B%20pages%3D%26quot%3B1%26quot%3B%26gt%3B%26lt%3Bdiagram%20id%3D%26quot%3BxoFWK3179xdSsBTeCwkt%26quot%3B%20name%3D%26quot%3BPage-1%26quot%3B%26gt%3B7Vpdc%2BMmFP01nmkfNqNvK4%2BJ7aQzm2Q79U67feoQCUvUSKgIxXZ%2FfUEC6wPFlmftKJPuU%2BAIELr3nHsvOBN7lmzvKcjiRxJCPLGMcDux5xPLMs2pzf8IZFchnu1WQERRKAfVwBL9CyVoSLRAIcxbAxkhmKGsDQYkTWHAWhiglGzaw1YEt9%2BagQhqwDIAWEf%2FQCGLK9R3jRr%2FBaIoVm82DfkkAWqwBPIYhGTTgOzFxJ5RQljVSrYziIXxlF2qeXevPN1vjMKUDZngzz%2FfP4Tr2eqfhXn%2FFYLZI%2Fjrk1wlZzv1wTDk3y%2B7hLKYRCQFeFGjt5QUaQjFqgbv1WMeCMk4aHLwb8jYTjoTFIxwKGYJlk%2FhFrFvjfafYqkrV%2FbmW7ly2dmpTsro7luz05gluvW0sqfmVd8nPupVsykbkIIG8ICtLEk%2FQCPIDtm09i6XBSQJ5BviEynEgKGX9kaA5Ge0H1e7kDekF0%2FwqNzlC8CFfNMd4J9lGXPIuEAInVge5tu%2FfRatSLR%2Bevw6e3r6WaNC29GbGDG4zEBpow1Xe9up8rWQMrg9bGrdMnKCo7QjY4U5lf1NrTxTySluqM4zLmRM94c8BsvDGygPf0x1eJo6fuf5hfRo4irJHF0S%2FBU888A3kYPfUYOvq8HvEYNzKTFMf4hhsBj8gWKwxhSDr6cKChJebY3J%2Bj1%2F3wvt1cJvzPs9h6%2BmbovGR0icc86xG1H5ciAlKVTYHRLfLWeEakSAQZ6joALlEPO8WlCl%2FHEx2GOqQW2zUzmNqwavWxKNrQbTH1UN5mkRPSjoS%2FneZipR%2BWJIKjmjDKzpUBm8QonBMpBTfyWI77EOrFabSpbntpeoNiZnNQ%2BS3YU6Edru7qUyhbZQybf993wHBa%2FHLEROYc9ZaDsKBb%2B3LhnKHGGLt%2BSO2nMjyP%2B2uHnQCMUjNmv7P2eUrOGMYH6C3ifWFU%2BYHQhgFKUisXLvQI7fiviPAoBv5IMEhSF%2BLXu0aXqGBNI5RFg9%2BcPpyR%2FWpfKH1XM%2FcfN58b9xgO2M7ABn1Cu%2FdvScHg6f76%2BUteyhAdQZs5RV22yITHP6kdoV5Fl1nb5CW%2BH4sxSzZicYXQ%2FTwsWK2cpLh%2B2UxyATzaB4HlDyP1dCeXjeAyBYR6V8vhQMI8HhEg8BXX%2FhyyBWsv3KcNugVaLnulj1jhve6zH8xe5VLfd9ErRTItvG2ATV7yvfhZ3sjp3Mke1kO2MmtVPuJls5rc5YzbRmttOaTHx1ThvtXDpuTptqUpiV5lshXuXp9%2FdzCDM%2B%2FAkWFOCywTaErsf%2FlUs7h%2FcEY7NPPReLxkrNDdMuQZJxA33YqtxzO05w3EER7GJlua2XbEvGa4eP6wJXmfzEZHu5k5F%2BNNWsX1eDFO9uKXeQCJnHokZtu6rHeLwkwuKfxH3ueQh9fTyqvOlP545O6JR3V71X6h%2BG1FPzeGXknofUvFv%2F81B1P1b%2FC5a9%2BA8%3D%26lt%3B%2Fdiagram%26gt%3B%26lt%3B%2Fmxfile%26gt%3B%22%20style%3D%22background-color%3A%20rgb%28255%2C%20255%2C%20255%29%3B%22%3E%3Cdefs%3E%3Cfilter%20id%3D%22dropShadow%22%3E%3CfeGaussianBlur%20in%3D%22SourceAlpha%22%20stdDeviation%3D%221.7%22%20result%3D%22blur%22%2F%3E%3CfeOffset%20in%3D%22blur%22%20dx%3D%223%22%20dy%3D%223%22%20result%3D%22offsetBlur%22%2F%3E%3CfeFlood%20flood-color%3D%22%233D4574%22%20flood-opacity%3D%220.4%22%20result%3D%22offsetColor%22%2F%3E%3CfeComposite%20in%3D%22offsetColor%22%20in2%3D%22offsetBlur%22%20operator%3D%22in%22%20result%3D%22offsetBlur%22%2F%3E%3CfeBlend%20in%3D%22SourceGraphic%22%20in2%3D%22offsetBlur%22%2F%3E%3C%2Ffilter%3E%3C%2Fdefs%3E%3Cg%20filter%3D%22url%28%23dropShadow%29%22%3E%3Cpath%20d%3D%22M%20880.67%2060%20L%201027.93%2060%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22stroke%22%2F%3E%3Cpath%20d%3D%22M%201038.43%2060%20L%201024.43%2067%20L%201027.93%2060%20L%201024.43%2053%20Z%22%20fill%3D%22%23000000%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Crect%20x%3D%22640%22%20y%3D%220%22%20width%3D%22240%22%20height%3D%22120%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%28683.5%2C33.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2276%22%20height%3D%2226%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2076px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3EFace%20Detector%3Cbr%20%2F%3E%28MTCNN%29%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2238%22%20y%3D%2219%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3EFace%20Detector%26lt%3Bbr%26gt%3B%28MTCNN%29%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Cpath%20d%3D%22M%20160.67%2060%20L%20307.93%2060%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22stroke%22%2F%3E%3Cpath%20d%3D%22M%20318.43%2060%20L%20304.43%2067%20L%20307.93%2060%20L%20304.43%2053%20Z%22%20fill%3D%22%23000000%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Cellipse%20cx%3D%2280%22%20cy%3D%2260%22%20rx%3D%2280%22%20ry%3D%2240%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%2845.5%2C33.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2234%22%20height%3D%2226%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2036px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3EVideo%3Cbr%20%2F%3E%28.mp4%29%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2217%22%20y%3D%2219%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3E%5BNot%20supported%20by%20viewer%5D%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Cpath%20d%3D%22M%20480.67%2060%20L%20627.93%2060%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22stroke%22%2F%3E%3Cpath%20d%3D%22M%20638.43%2060%20L%20624.43%2067%20L%20627.93%2060%20L%20624.43%2053%20Z%22%20fill%3D%22%23000000%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Cellipse%20cx%3D%22400%22%20cy%3D%2260%22%20rx%3D%2280%22%20ry%3D%2240%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%28359.5%2C47.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2240%22%20height%3D%2212%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2042px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3EFrames%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2220%22%20y%3D%2212%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3EFrames%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Cpath%20d%3D%22M%201120%20100%20L%201120.67%20170%20L%201120.67%20227.26%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22stroke%22%2F%3E%3Cpath%20d%3D%22M%201120.67%20237.76%20L%201113.67%20223.76%20L%201120.67%20227.26%20L%201127.67%20223.76%20Z%22%20fill%3D%22%23000000%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Cellipse%20cx%3D%221120%22%20cy%3D%2260%22%20rx%3D%2280%22%20ry%3D%2240%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%281087.5%2C47.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2232%22%20height%3D%2212%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2034px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3EFaces%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2216%22%20y%3D%2212%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3EFaces%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Cpath%20d%3D%22M%20280.67%20300%20Q%20180.67%20300%20180.67%20270%20Q%20180.67%20240%2093.4%20240%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22stroke%22%2F%3E%3Cpath%20d%3D%22M%2082.9%20240%20L%2096.9%20233%20L%2093.4%20240%20L%2096.9%20247%20Z%22%20fill%3D%22%23000000%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Cpath%20d%3D%22M%20280.67%20300%20Q%20180.67%20300%20180.67%20330%20Q%20180.67%20360%2093.4%20360%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22stroke%22%2F%3E%3Cpath%20d%3D%22M%2082.9%20360%20L%2096.9%20353%20L%2093.4%20360%20L%2096.9%20367%20Z%22%20fill%3D%22%23000000%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Crect%20x%3D%220%22%20y%3D%22220%22%20width%3D%2280%22%20height%3D%2240%22%20fill%3D%22none%22%20stroke%3D%22none%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%287.5%2C227.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2232%22%20height%3D%2212%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2032px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3EREAL%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2216%22%20y%3D%2212%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3EREAL%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Crect%20x%3D%220%22%20y%3D%22340%22%20width%3D%2280%22%20height%3D%2240%22%20fill%3D%22none%22%20stroke%3D%22none%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%289.5%2C347.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2230%22%20height%3D%2212%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2032px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3EFAKE%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2215%22%20y%3D%2212%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3EFAKE%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Cpath%20d%3D%22M%201060.67%20300%20L%20892.74%20300%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22stroke%22%2F%3E%3Cpath%20d%3D%22M%20882.24%20300%20L%20896.24%20293%20L%20892.74%20300%20L%20896.24%20307%20Z%22%20fill%3D%22%23000000%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Crect%20x%3D%221060%22%20y%3D%22240%22%20width%3D%2280%22%20height%3D%2280%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20pointer-events%3D%22all%22%2F%3E%3Cpath%20d%3D%22M%20760%20240%20L%20840%20240%20L%20880%20280%20L%20880%20360%20L%20800%20360%20L%20760%20320%20L%20760%20240%20Z%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Cpath%20d%3D%22M%20760%20240%20L%20840%20240%20L%20880%20280%20L%20800%20280%20Z%22%20fill-opacity%3D%220.05%22%20fill%3D%22%23000000%22%20stroke%3D%22none%22%20pointer-events%3D%22all%22%2F%3E%3Cpath%20d%3D%22M%20760%20240%20L%20800%20280%20L%20800%20360%20L%20760%20320%20Z%22%20fill-opacity%3D%220.1%22%20fill%3D%22%23000000%22%20stroke%3D%22none%22%20pointer-events%3D%22all%22%2F%3E%3Cpath%20d%3D%22M%20800%20360%20L%20800%20280%20L%20760%20240%20M%20800%20280%20L%20880%20280%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Crect%20x%3D%221080%22%20y%3D%22260%22%20width%3D%2280%22%20height%3D%2280%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20pointer-events%3D%22all%22%2F%3E%3Crect%20x%3D%221100%22%20y%3D%22280%22%20width%3D%2280%22%20height%3D%2280%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20pointer-events%3D%22all%22%2F%3E%3Cpath%20d%3D%22M%20573.4%20300%20L%20760%20300%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22stroke%22%2F%3E%3Cpath%20d%3D%22M%20562.9%20300%20L%20576.9%20293%20L%20573.4%20300%20L%20576.9%20307%20Z%22%20fill%3D%22%23000000%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20pointer-events%3D%22all%22%2F%3E%3Crect%20x%3D%22280%22%20y%3D%22240%22%20width%3D%22280%22%20height%3D%22120%22%20fill%3D%22%23ffffff%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%28297.5%2C273.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%22122%22%20height%3D%2226%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%20124px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3EClassifier%3Cbr%20%2F%3E%28Deep%20Neural%20Network%29%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2261%22%20y%3D%2219%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3EClassifier%26lt%3Bbr%26gt%3B%28Deep%20Neural%20Network%29%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Crect%20x%3D%221140%22%20y%3D%22150%22%20width%3D%2280%22%20height%3D%2240%22%20fill%3D%22none%22%20stroke%3D%22none%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%281139.5%2C157.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2240%22%20height%3D%2212%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2042px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3ESample%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2220%22%20y%3D%2212%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3ESample%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Crect%20x%3D%22930%22%20y%3D%22260%22%20width%3D%2280%22%20height%3D%2240%22%20fill%3D%22none%22%20stroke%3D%22none%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%28939.5%2C267.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2230%22%20height%3D%2212%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2032px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3EStack%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2215%22%20y%3D%2212%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3EStack%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3Cpath%20d%3D%22M%201260%20240%20L%201250%20240%20Q%201240%20240%201240%20260%20L%201240%20280%20Q%201240%20300%201230%20300%20L%201225%20300%20Q%201220%20300%201230%20300%20L%201235%20300%20Q%201240%20300%201240%20320%20L%201240%20340%20Q%201240%20360%201250%20360%20L%201260%20360%22%20fill%3D%22none%22%20stroke%3D%22%23000000%22%20stroke-width%3D%222%22%20stroke-miterlimit%3D%2210%22%20transform%3D%22rotate%28-180%2C1240%2C300%29%22%20pointer-events%3D%22all%22%2F%3E%3Crect%20x%3D%221260%22%20y%3D%22280%22%20width%3D%22100%22%20height%3D%2240%22%20fill%3D%22none%22%20stroke%3D%22none%22%20pointer-events%3D%22all%22%2F%3E%3Cg%20transform%3D%22translate%281271.5%2C287.5%29scale%282%29%22%3E%3Cswitch%3E%3CforeignObject%20style%3D%22overflow%3Avisible%3B%22%20pointer-events%3D%22all%22%20width%3D%2238%22%20height%3D%2212%22%20requiredFeatures%3D%22http%3A%2F%2Fwww.w3.org%2FTR%2FSVG11%2Ffeature%23Extensibility%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3A%20inline-block%3B%20font-size%3A%2012px%3B%20font-family%3A%20Helvetica%3B%20color%3A%20rgb%280%2C%200%2C%200%29%3B%20line-height%3A%201.2%3B%20vertical-align%3A%20top%3B%20width%3A%2040px%3B%20white-space%3A%20nowrap%3B%20overflow-wrap%3A%20normal%3B%20text-align%3A%20center%3B%22%3E%3Cdiv%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxhtml%22%20style%3D%22display%3Ainline-block%3Btext-align%3Ainherit%3Btext-decoration%3Ainherit%3Bwhite-space%3Anormal%3B%22%3En%20faces%3C%2Fdiv%3E%3C%2Fdiv%3E%3C%2FforeignObject%3E%3Ctext%20x%3D%2219%22%20y%3D%2212%22%20fill%3D%22%23000000%22%20text-anchor%3D%22middle%22%20font-size%3D%2212px%22%20font-family%3D%22Helvetica%22%3En%20faces%3C%2Ftext%3E%3C%2Fswitch%3E%3C%2Fg%3E%3C%2Fg%3E%3C%2Fsvg%3E) # --- # ## Implementation # I will loop through all test videos and try to get face images by using the same strategy as I have applied to the validation process in the [*DFDC-Multiface-Training*](https://www.kaggle.com/phunghieu/dfdc-multiface-training) kernel. The only difference is instead of having well-prepared data, I must run a face-detector, the same as I used to prepare the training dataset in the [*Data Preparation*](https://www.kaggle.com/phunghieu/deepfake-detection-face-extractor) kernel, to directly extract faces from each frame of one input video. # If I fail to get enough faces from a video, I will mark it as `invalid` and assign a `default predicted value` (probability) to this video near the end of the notebook. # --- # ## Pipeline # This end-to-end solution includes 3 steps: # 1. [*Data Preparation*](https://www.kaggle.com/phunghieu/deepfake-detection-face-extractor) # 1. [*Training*](https://www.kaggle.com/phunghieu/dfdc-multiface-training) # 1. *Inference* <- **you're here** # [Back to Table of Contents](#toc) # # Install libraries and packages # [Back to Table of Contents](#toc) # Install facenet-pytorch from facenet_pytorch.models.inception_resnet_v1 import get_torch_home torch_home = get_torch_home() # Copy model checkpoints to torch cache so they are loaded automatically by the package # # # Import libraries # [Back to Table of Contents](#toc) import torch from torch import nn from torch.utils.data import Dataset, DataLoader from torchvision.models import resnet18 from facenet_pytorch import MTCNN from albumentations import Normalize, Compose import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 from PIL import Image from tqdm.notebook import tqdm import os import glob import multiprocessing as mp if torch.cuda.is_available(): device = "cuda:0" torch.set_default_tensor_type("torch.cuda.FloatTensor") else: device = "cpu" print(f"Running on device: {device}") # # # Configure hyper-parameters # [Back to Table of Contents](#toc) TEST_DIR = "/kaggle/input/deepfake-detection-challenge/test_videos/" MODEL_PATH = "/kaggle/input/dfdcmultifacef5-resnet18/f5_resnet18.pth" N_FACES = 5 BATCH_SIZE = 64 NUM_WORKERS = mp.cpu_count() FRAME_SCALE = 0.25 FACE_BATCH_SHAPE = (N_FACES * 3, 160, 160) DEFAULT_PROB = 0.5 # # # Define useful classes # [Back to Table of Contents](#toc) class DeepfakeClassifier(nn.Module): def __init__(self, encoder, in_channels=3, num_classes=1): super(DeepfakeClassifier, self).__init__() self.encoder = encoder # Modify input layer. self.encoder.conv1 = nn.Conv2d( in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False ) # Modify output layer. self.encoder.fc = nn.Linear(512 * 1, num_classes) def forward(self, x): return torch.sigmoid(self.encoder(x)) def freeze_all_layers(self): for param in self.encoder.parameters(): param.requires_grad = False def freeze_middle_layers(self): self.freeze_all_layers() for param in self.encoder.conv1.parameters(): param.requires_grad = True for param in self.encoder.fc.parameters(): param.requires_grad = True def unfreeze_all_layers(self): for param in self.encoder.parameters(): param.requires_grad = True class TestVideoDataset(Dataset): def __init__( self, test_dir, frame_resize=None, face_detector=None, n_faces=1, preprocess=None, ): self.test_dir = test_dir self.test_video_paths = glob.glob(os.path.join(self.test_dir, "*.mp4")) self.face_detector = face_detector self.n_faces = n_faces self.frame_resize = frame_resize self.preprocess = preprocess def __len__(self): return len(self.test_video_paths) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() test_video_path = self.test_video_paths[idx] test_video = test_video_path.split("/")[-1] # Get faces until enough (try limit: n_faces) faces = [] for i in range(self.n_faces): # Create video reader and find length v_cap = cv2.VideoCapture(test_video_path) v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT)) stride = int(v_len / (self.n_faces**2)) sample = np.linspace( i * stride, (v_len - 1) + i * stride, self.n_faces ).astype(int) frames = [] # Get frames for j in range(v_len): success = v_cap.grab() if j in sample: success, frame = v_cap.retrieve() if not success: continue frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = Image.fromarray(frame) # Resize frame to desired size if self.frame_resize is not None: frame = frame.resize( [int(d * self.frame_resize) for d in frame.size] ) frames.append(frame) if len(frames) > 0: all_faces_in_frames = [ detected_face for detected_faces in self.face_detector(frames) if detected_faces is not None for detected_face in detected_faces ] faces.extend(all_faces_in_frames) if len(faces) >= self.n_faces: # Get enough faces break v_cap.release() if len(faces) >= self.n_faces: # Get enough faces faces = faces[: self.n_faces] # Get top if self.preprocess is not None: for j in range(len(faces)): augmented = self.preprocess( image=faces[j].cpu().detach().numpy().transpose(1, 2, 0) ) faces[j] = augmented["image"] faces = np.concatenate(faces, axis=-1).transpose(2, 0, 1) return {"video_name": test_video, "faces": faces, "is_valid": True} else: return { "video_name": test_video, "faces": np.zeros(FACE_BATCH_SHAPE, dtype=np.float32), "is_valid": False, # Those invalid videos will get DEFAULT_PROB } # # # Start the inference process # [Back to Table of Contents](#toc) # Load face detector. face_detector = MTCNN( margin=14, keep_all=True, factor=0.5, post_process=False, device=device ).eval() encoder = resnet18(pretrained=False) classifier = DeepfakeClassifier(encoder=encoder, in_channels=3 * N_FACES, num_classes=1) classifier.to(device) state = torch.load(MODEL_PATH, map_location=lambda storage, loc: storage) classifier.load_state_dict(state["state_dict"]) classifier.eval() preprocess = Compose( [Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], p=1)] ) test_dataset = TestVideoDataset( TEST_DIR, frame_resize=FRAME_SCALE, face_detector=face_detector, n_faces=N_FACES, preprocess=preprocess, ) test_dataloader = DataLoader( test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS ) submission = [] with torch.no_grad(): try: for videos in tqdm(test_dataloader): y_pred = classifier(videos["faces"]).squeeze(dim=-1).cpu().detach().numpy() submission.extend( list( zip( videos["video_name"], y_pred, videos["is_valid"].cpu().detach().numpy(), ) ) ) except Exception as e: print(e) submission = pd.DataFrame(submission, columns=["filename", "label", "is_valid"]) submission.sort_values("filename", inplace=True) submission.loc[submission.is_valid == False, "label"] = DEFAULT_PROB # # # Save the submission # [Back to Table of Contents](#toc) submission[["filename", "label"]].to_csv("submission.csv", index=False) plt.hist(submission.label, 20) plt.show()
# ## Loading Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns init_notebook_mode(connected=True) plt.style.use("ggplot") from collections import Counter from wordcloud import WordCloud from PIL import Image import urllib.request import random from sklearn.preprocessing import StandardScaler # # Data Preprocessing # Loading the dataset and gathering a glimpse: import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("../input/new-york-city-current-job-postings/nyc-jobs.csv") df.head() df.info() # #### Columns Description: # - **Job ID**: The Unique Job ID for each opening # - **Posting Type**: The opening type, whether internal or external, for the job. # - **# of Positions**: The number of positions available for a certain opening # - **Business Title**: The position the candidate would hold. # - **Civil Service Title**: The Broad Title the position would be classified under # - **Title Code No**: The Code for a particular title # - **Level**: The authority the certain opening would bring with it # - **Job Category**: Broad Classification of where all the jobs would fall in # - **Full-time/Part-Time**: Time frame of a job. # - **Salary Range From**: The beginning salary cap for that particular opening # - **Salary Range To**: The highest cap for that particular job opening. # - **Salary Frequency**: The payment factor for the job, hourly or annual # - **Work Location**: The location of the workplace # - **Division/Work Unit**: Broad working units for all the jobs # - **Job Description**: A brief idea of what the job will contain # - **Minimum Qual Requirements**: The minimum qualifications a candidate must possess for the job # - **Preferred Skills**: Optimal skills which the posting is looking for # - **Additional Information**: Any additional information provided with the job opening # - **Hours/Shift**: The timings for the job # - **Work Location 1**: Additional information for the work location # - **Recruitment Contact**: Empty field, supposed to contain numbers # - **Residency Requirement**: Whether the employee must be a resident of NYC. # - **Posting date**: When the opening was announced. # - **Post Until**: The closing date. # - **Posting Updated**: The time when the posting was updated for the opening. # - **Process Date**: When the posting process was completed # Phew! That was a lot of columns, well then, let's get to exploring them! # # Data Preprocessing def missing_values_table(df): # Total missing values mis_val = df.isnull().sum() # Percentage of missing values mis_val_percent = 100 * df.isnull().sum() / len(df) # Make a table with the results mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) # Rename the columns mis_val_table_columns = mis_val_table.rename( columns={0: "Missing Values", 1: "% of Total Values"} ) # Sort the table by percentage of missing descending # .iloc[:, 1]!= 0: filter on missing missing values not equal to zero mis_val_table_columns = ( mis_val_table_columns[mis_val_table_columns.iloc[:, 1] != 0] .sort_values("% of Total Values", ascending=False) .round(2) ) # round(2), keep 2 digits # Print some summary information print( "Dataset has {} columns.".format(df.shape[1]) + "\n" + "There are {} columns that have missing values.".format( mis_val_table_columns.shape[0] ) ) # Return the dataframe with missing information return mis_val_table_columns missing_values_table(df) df = df.drop( ["Recruitment Contact", "Hours/Shift", "Post Until", "Work Location 1"], axis=1 ) # As we see from the above step that Recruitment Contact, Hours/Shift, Post Until, Work Location 1has more than 50% null values, so it's abvious to drop these columns df = df.drop(["Additional Information"], axis=1) # Even 'Additional Information' is not relevant to our requirement, so it has to be removed missing_values_table(df) for column in [ "Job Category", "Residency Requirement", "Posting Date", "Posting Updated", "Process Date", "To Apply", ]: df[column] = df[column].fillna(df[column].mode()[0]) # Replacing null values of few variables which has less than 0.1% of null values with mode of respective features # # Exploratory Data Analysis # ### Highest High Salary Range high_sal_range = ( df.groupby("Civil Service Title")["Salary Range To"].mean().nlargest(10) ).reset_index() fig = px.bar( high_sal_range, y="Civil Service Title", x="Salary Range To", orientation="h", title="Highest High Salary Range", color="Salary Range To", color_continuous_scale=px.colors.qualitative.G10, ).update_yaxes(categoryorder="total ascending") fig.show() # Oh. It seems that **Senior General Deputy Manager**, in general, has the highest avergae salary range, ranging upto $230,000 per year! # Now that's an impressive amount. # Most of the openigns in the top ten highest salary seem to be from executive fields, or higher posts. These are the fields which rake in most of the money, on average, paving way for the high salaries people seem to hear about! popular_categories = df["Job Category"].value_counts()[:5] popular_categories # ### Top 10 Job Openings via Category job_categorydf = ( df["Job Category"] .value_counts(sort=True, ascending=False)[:10] .rename_axis("Job Category") .reset_index(name="Counts") ) job_categorydf = job_categorydf.sort_values("Counts") trace = go.Scatter( y=job_categorydf["Job Category"], x=job_categorydf["Counts"], mode="markers", marker=dict( size=job_categorydf["Counts"].values / 2, color=job_categorydf["Counts"].values, colorscale="Viridis", showscale=True, colorbar=dict(title="Opening Counts"), ), text=job_categorydf["Counts"].values, ) data = [(trace)] layout = go.Layout( autosize=False, width=1000, height=750, title="Top 10 Job Openings Count", hovermode="closest", xaxis=dict(showgrid=False, zeroline=False, showline=False), yaxis=dict( title="Job Openings Count", ticklen=2, gridwidth=5, showgrid=False, zeroline=True, showline=False, ), showlegend=False, ) fig = go.Figure(data=data, layout=layout) py.iplot(fig) # # Feature Engineering num_cols = df._get_numeric_data().columns num_cols cat_cols = list(set(df.columns) - set(num_cols)) today = pd.datetime.today() redudant_cols = [ "Job ID", "# Of Positions", "Posting Updated", "Minimum Qual Requirements", "To Apply", "Business Title", "Level", ] df[cat_cols] # Based on the business problem given in the problem statement, it can be said that personal information(Posting date,process date,resident details) will be of no use for our employee segregeration df = df.drop(redudant_cols, axis=1) df # ### Data Cleaning and Transformation def parse_categories(x): l = x.replace("&", ",").split(",") l = [x.strip().rstrip(",") for x in l] key_categories.extend(l) def parse_keywords(x, l): x = x.lower() tokens = nltk.word_tokenize(x) stop_words = set(stopwords.words("english")) token_l = [w for w in tokens if not w in stop_words and w.isalpha()] l.extend(token_l) def preferred_skills(x): kwl = [] df[df["Job Category"] == x]["Preferred Skills"].dropna().apply( parse_keywords, l=kwl ) kwl = pd.Series(kwl) return kwl.value_counts()[:20] key_categories = [] df["Job Category"].dropna().apply(parse_categories) key_categories = pd.Series(key_categories) key_categories = key_categories[key_categories != ""] popular_categories = key_categories.value_counts().iloc[:25] key_categories df["cat"] = key_categories plt.figure(figsize=(10, 10)) sns.countplot(y=key_categories, order=popular_categories.index, palette="YlGn") salary_table = df[["Civil Service Title", "Salary Range From", "Salary Range To"]] jobs_highest_high_range = pd.DataFrame( salary_table.groupby(["Civil Service Title"])["Salary Range To"].mean().nlargest(10) ).reset_index() plt.figure(figsize=(8, 6)) sns.barplot( y="Civil Service Title", x="Salary Range To", data=jobs_highest_high_range, palette="Greys", ) def plot_wordcloud(text): wordcloud = WordCloud(background_color="white", width=1024, height=720).generate( text ) plt.clf() plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() job_description_keywords = [] df["Job Description"].apply(parse_keywords, l=job_description_keywords) plt.figure(figsize=(10, 8)) counter = Counter(job_description_keywords) common = [x[0] for x in counter.most_common(40)] plot_wordcloud(" ".join(common)) # From the above wordcloud, it can be seen that work, city, project, water, new are most frequently used words in the Job description, whereas staff system,management, planning, design, support e.t.c are required skills which are demanded mostly by the employer words = [] counts = [] for letter, count in counter.most_common(10): words.append(letter) counts.append(count) import matplotlib.cm as cm from matplotlib import rcParams colors = cm.rainbow(np.linspace(0, 1, 10)) rcParams["figure.figsize"] = 20, 10 plt.title("Top words in the Job description vs their count") plt.xlabel("Count") plt.ylabel("Words") plt.barh(words, counts, color=colors) # So, here we can remove the words which doesn't necessarily depict any information related to skills df["Posting Date"] = pd.to_datetime(df["Posting Date"]) df["Process Date"] = pd.to_datetime(df["Process Date"]) # As there is no column for years of exprience, so we can assume that process date is the date when either latest or new posting has been published by the employer df["years of exprience"] = df["Process Date"] - df["Posting Date"] df["years of exprience"] = df["years of exprience"].dt.days df_cluster = df[["cat", "Salary Range To", "years of exprience"]] df_cluster.isna().sum() df_cluster["cat"].value_counts() df_cluster["cat"].fillna("Others", inplace=True) df_cluster = df_cluster.replace("\*", "", regex=True) df_cluster # we are creating new dataframe with job category, maximum salary for the respective role and years of exprience. Reason of taking max salary instead of mean salary is to categorize those set of job which demands niche skills and higher salary # Calculating the Hopkins statistic from sklearn.neighbors import NearestNeighbors from random import sample from numpy.random import uniform import numpy as np from math import isnan def hopkins(X): d = X.shape[1] # d = len(vars) # columns n = len(X) # rows m = int(0.1 * n) nbrs = NearestNeighbors(n_neighbors=1).fit(X.values) rand_X = sample(range(0, n, 1), m) ujd = [] wjd = [] for j in range(0, m): u_dist, _ = nbrs.kneighbors( uniform(np.amin(X, axis=0), np.amax(X, axis=0), d).reshape(1, -1), 2, return_distance=True, ) ujd.append(u_dist[0][1]) w_dist, _ = nbrs.kneighbors( X.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True ) wjd.append(w_dist[0][1]) H = sum(ujd) / (sum(ujd) + sum(wjd)) if isnan(H): print(ujd, wjd) H = 0 return H # Let's check the Hopkins measure hopkin_df = df_cluster hopkins(hopkin_df.drop(["cat"], axis=1)) # 0.99 is a good Hopkins score. Hence the data is very much suitable for clustering. Preliminary check is now done. # We can do standardisation again or else we can skip this step as well. df_cluster_std = df_cluster X_C = df_cluster_std.drop(["cat"], axis=1) df_cluster_std = StandardScaler().fit_transform(X_C) df_cluster # # K-means Clustering # Let's check the silhouette score first to identify the ideal number of clusters # To perform KMeans clustering from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score sse_ = [] for k in range(2, 10): kmeans = KMeans(n_clusters=k).fit(df_cluster_std) sse_.append([k, silhouette_score(df_cluster_std, kmeans.labels_)]) plt.plot(pd.DataFrame(sse_)[0], pd.DataFrame(sse_)[1]) # The sihouette score reaches a peak at around 4 clusters indicating that it might be the ideal number of clusters. # The sihouette score reaches a peak at around 4 clusters indicating that it might be the ideal number of clusters. # Let's use the elbow curve method to identify the ideal number of clusters. ssd = [] for num_clusters in list(range(1, 10)): model_clus = KMeans(n_clusters=num_clusters, max_iter=50) model_clus.fit(df_cluster_std) ssd.append(model_clus.inertia_) plt.plot(ssd) # A distinct elbow is formed at around 2-5 clusters. Let's finally create the clusters and see for ourselves which ones fare better # K-means with k=4 clusters model_clus4 = KMeans(n_clusters=4, max_iter=50) model_clus4.fit(df_cluster_std) dat4 = df_cluster dat4.index = pd.RangeIndex(len(dat4.index)) dat_km = pd.concat([dat4, pd.Series(model_clus4.labels_)], axis=1) dat_km.columns = ["cat", "salary_max", "exp", "ClusterID"] dat_km dat_km["ClusterID"].value_counts() dat_km # One thing we noticed is all distinct clusters are being formed except cluster 1 with more data points # Now let's create the cluster means wrt to the various variables mentioned in the question and plot and see how they are related df_final = pd.merge(df, dat_km, on="cat") df_final df_final.info() # Along Job category and years of exprience sns.scatterplot(x="cat", y="exp", hue="ClusterID", data=df_final) # Along Job category and years of exprience sns_plot = sns.scatterplot(x="Salary Range To", y="exp", hue="cat", data=df_final) # From the above plot, it can be seen that different salary ranges based on job category(cat) and years of experience(exp). # fig = sns_plot.get_figure() fig.savefig("output.png") # As Job categories are more, x-axis in the graph is not visible but we can make a clear depiction below # let's take a look at those Job category clusters and try to make sense if the clustering process worked well. df_final_on_jobcat = df_final[df_final["ClusterID"] == 1] df_final_on_jobcat["cat"].value_counts()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. data = pd.read_csv( "/kaggle/input/gtd/globalterrorismdb_0718dist.csv", encoding="ISO-8859-1" ) # drop all NaN columns # df = data.dropna(axis=1) # for look at all raws and cols # pd.set_option("display.max.columns", None) # pd.set_option("display.max.rows", None) # for 4 number after , # pd.set_option("display.precision", 4) data.rename( columns={ "iyear": "Year", "imonth": "Month", "iday": "Day", "country_txt": "Country", "region_txt": "Region", "attacktype1_txt": "AttackType", "target1": "Target", "nkill": "Killed", "nwound": "Wounded", "summary": "Summary", "gname": "Group", "targtype1_txt": "Target_type", "weaptype1_txt": "Weapon_type", "motive": "Motive", }, inplace=True, ) df = data[ [ "Year", "Month", "Day", "Country", "Region", "city", "latitude", "longitude", "AttackType", "Killed", "Wounded", "Target", "Summary", "Group", "Target_type", "Weapon_type", "Motive", ] ] df.info() df.columns df.tail() df.corr() # correlation map sns.set(font_scale=1.5) f, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(df.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) plt.show() # Line Plot df.Year.plot( kind="line", color="g", label="Year", linewidth=1, alpha=0.5, grid=True, linestyle="-.", figsize=(12, 12), ) plt.legend(loc="upper right") plt.xlabel("Terror") plt.ylabel("Year") plt.title("Terror-Year Line Plot") plt.show() # Scatter Plot plt.subplots(figsize=(12, 12)) sns.scatterplot(data=df, x="Year", y="Killed") plt.show() # Histogram Plot df.Year.plot(kind="hist", bins=100, figsize=(20, 12)) plt.title("Country Histogram Plot") plt.show() # Histogram plt.subplots(figsize=(12, 12)) sns.countplot( "Region", data=df, palette="RdYlGn_r", edgecolor=sns.color_palette("dark", 7), order=df["Region"].value_counts().index, ) plt.xticks(rotation=90) plt.title("Number Of Terrorist Activities Each Region") plt.show() terrorInTurkey = df[(df["Country"] == "Turkey")] terrorInTurkey # Histogram plt.subplots(figsize=(35, 10)) sns.countplot( "Group", data=terrorInTurkey, palette="RdYlGn_r", edgecolor=sns.color_palette("dark", 7), ) plt.xticks(rotation=90) plt.title("Number Of Terror Group Activities in Turkey") plt.show() terrorInIstanbul = df[(df["city"] == "Istanbul") & (df["Year"] > 2000)] terrorInIstanbul plt.subplots(figsize=(20, 10)) sns.countplot( terrorInIstanbul["AttackType"], palette="inferno", order=terrorInIstanbul["AttackType"].value_counts().index, ) plt.xticks(rotation=90) plt.title("Number of Attack Type Activities After 2000 in Istanbul") plt.show() # DICTIONARY dic = {"Riot Games": "LoL", "Rockstar Games": "GTA", "Valve": "CSGO"} print(dic.keys()) print(dic.values()) dic["Riot Games"] = "LoR" # update entry dic["CD Projekt"] = "Witcher 3" # add new entyr print(dic) print("LoR" in dic) # check inlude or not print("Riot Games" in dic) dic.clear() # clear dictionary print(dic) # WHILE AND FOR LOOPS i = 0 while i < 10: print(i * i) i += 1 lis = [1, 2, 3, 4, 5] for index, value in enumerate(lis): print("index:", index, "value:", value) dic = {"Riot Games": "LoL", "Rockstar Games": "GTA", "Valve": "CSGO"} for key, value in dic.items(): print(key + ":", value) for index, value in terrorInIstanbul[["Killed"]][:].iterrows(): print(index, value)
# # GOAL # ### This notebook gives a summary of a study and explains how regression techniques can be used to predict the price of a house. # ### The features in the dataset represent different attributes of each house in the dataset. # ### It is the objective of this project to apply regression models in order to predict the values for the SalePrice feature. # ## ML Methods used: # ### **The models that I used in this task:** # **`Linear Regression`,** # **`Lasso Regression`,** # **`Ridge Regression`,** # **`Support Vector Regressor`,** # **`KNeighbours Regressor`,** # **`Random Forest Regressor`** # # Imports packages # No Data without you guys! import numpy as np import pandas as pd # Visualizations from IPython.display import display, HTML import plotly.express as px import matplotlib.pyplot as plt # Statistics from scipy.stats import skew # Metrics from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.metrics import mean_squared_error, r2_score # Models from xgboost import XGBRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.linear_model import LinearRegression from sklearn.svm import SVR from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import RandomForestRegressor # An effective way to set the display option for all the columns in the dataframe pd.set_option("display.max_columns", None) # Ignore in case of warnings import warnings warnings.filterwarnings(action="ignore") # # Load Data # Load train data and make a copy of it train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) df_train = train.copy() # Load df_test data df_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) # Load sample_submission sample_data = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv" ) # # Checking train Dataset print("**Dataset:**") print("-------------Train-------------------\n") display(df_train.head()) print("--------------Train shape ------------------\n") print("**Dataset shape:**", df_train.shape) print("--------------Train columns ------------------\n") display(df_train.columns) print("--------------------Train description----------------------------") display(df_train.describe().T.head(10)) print("--------------------Train Types----------------------------------") display(df_train.dtypes.sort_values(ascending=False)) print("--------------------Train NA Check-------------------------------") display(df_train.isna().sum().sort_values(ascending=False).head(20)) # ### Description of checking Train data result: # * **Data contains 81 columns with 1460 samples.** # * **Data contains Object, int and float type vlues** # * **Data contains null values which the most are in "PoolQC" (Need to be check for the ratio of null values in case of how to handle the null values)** # ### Checking the ratio of Null values print("--------------------Null values ratio-----------------------\n") display(df_train.isnull().mean() * 100) print("--------------------Plotting the ratio of null values-----------------\n") (df_train.isnull().mean() * 100).plot.bar(figsize=(12, 6)) plt.ylabel("Percentage of missing values %") plt.xlabel("Variables") plt.title("Quantifying missing data") df_train.columns[df_train.isnull().mean() * 100 > 20] df_train = df_train.drop( columns=["Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature"], axis=1 ) display(df_train) for column in df_train.columns: if df_train[column].dtype == np.object: df_train[column].fillna(df_train[column].mode()[0], inplace=True) elif df_train[column].dtype == np.float64: df_train[column].fillna(df_train[column].median(), inplace=True) elif df_train[column].dtype == np.int64: df_train[column].fillna(df_train[column].median(), inplace=True) print("---------------Checking for the null value again-----------------\n") display(df_train.isnull().sum()) print("The totall amount of null values:", df_train.isnull().sum().sum()) print("---------------------The final train data set-------------------\n") display(df_train) # ### Description of checking ratio of null values and the handling ways result: # **It's best to aim for as few null values as possible, as they can affect the accuracy and reliability of analysis. # However, there is no hard and fast rule for what ratio of null values is acceptable.** # **As a general guideline, if the percentage of null values in the dataset is less than 5%, we can consider imputing the missing values using methods such as mean or median imputation.** # **If the percentage is greater than 5%, we may want to consider other methods such as dropping rows or columns with null values** # **Since the amount of data is 1460 and based on the ratio of the null values so I decieded to drop 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'.** # **For the rest of columns that contain null values, I take the median of columns contains int and float types and for the object types, I take mode of the columns.** # # Exploratory data analysis (EDA) and Feature Engineering # ### Checking the Skew and Normal distribution # **Skewness is a measure of the asymmetry of a distribution. If the distribution of a dataset is not symmetric, it is said to be skewed.** # **A dataset can be skewed in either direction, that is, it can have a positive or negative skew. The most common way to measure skewness is to calculate the skewness coefficient.** # **The skewness coefficient is a numerical measure of the degree of skewness in a dataset. It is calculated using the following formula:** # skewness = 3 * (mean - median) / standard deviation # **If the skewness coefficient is zero, the distribution is perfectly symmetrical. If the skewness coefficient is negative, the distribution is skewed to the left (negative skew), and if it is positive, the distribution is skewed to the right (positive skew).** # **In general, a skewness coefficient between -0.5 and 0.5 indicates a relatively symmetrical distribution. A skewness coefficient between -1 and -0.5 or between 0.5 and 1 indicates a moderately skewed distribution, while a skewness coefficient less than -1 or greater than 1 indicates a highly skewed distribution.** # numeric = ["int64", "float64"] for column in df_train.columns: if df_train[column].dtypes in numeric: skew = df_train[column].skew() if skew < -1 or skew > 2: print(f"The skewness of {column} is : {skew}.\n") df_train[column] = np.log1p(df_train[column]) new_skew = df_train[column].skew() print( f"After apply log transform, The skewness of {column} is: {new_skew}." ) print("-----------------------------------------\n") else: continue # ### Description of checking skewness of columns and the handling ways result: # **There are several methods for handling skewness in a dataset:** # **Square root transformation** # **Box-Cox transformation** # **Winsorization** # **Robust statistics** # **Log transformation: One common method for reducing skewness is to apply a logarithmic transformation to the data. This can help to spread out the values at the high end of the distribution and compress the values at the low end, resulting in a more symmetrical distribution.** # **After applying the log transformation the skewness gets much more better. In general, it's a good idea to explore the distribution of the data visually using histograms or other plots to determine the level of skewness and the most appropriate method for handling it.** # **For plotting the histogram of columns,I used one of the Contributor code plotly which I found it interesting. Thanks OLEKSII ZHUKOV** def histo_plot(data, column_name_str): fig = px.histogram( data, x=column_name_str, template="simple_white", color_discrete_sequence=["red"], ) fig.update_layout( xaxis_title=column_name_str, yaxis_title="Frequency", showlegend=True, font=dict(size=14), title={"text": "Distribution", "y": 0.95, "x": 0.5}, ) # Display fig.show() print("----------------Skewness value-----------------\n") print(data[column_name_str].skew()) # **df_test the distribution of PoolArea and LotArea in Train data set and the target SalePrice** histo_plot(df_train, "SalePrice") histo_plot(df_train, "PoolArea") histo_plot(df_train, "LotArea") # **The most values in PoolArea is zero and after log transform still not normally distributed so I drop it.** df_train.drop(["PoolArea"], axis=1, inplace=True) # **Till here I handled the numerical value but what about object and category data! Lets check the object values in train data** object_features = [] for column_name in df_train.columns: if df_train[column_name].dtype == "object": object_features.append(column_name) print( f"----------------Object type features name ----------> number of object features:{len(object_features)} <----------\n " ) display(object_features) # **it's important to convert object values (i.e. strings or categorical variables) into a numerical format that can be used as input to a machine learning algorithm. There are several methods for doing this:** # **One-hot encoding** # **Binary encoding** # **Hashing encoding** # **Label encoding: This method involves assigning a unique numerical value to each category in a categorical variable.** # ## Label Encoding label_encode = LabelEncoder() for column in df_train.columns: if df_train[column].dtype == "object": df_train[column] = label_encode.fit_transform(df_train[column]) df_train.head() # ## Let's see the relation between SalePrice our target and some of the features in the dataset: # **it's important to check for linear relationships between the target variable and the other variables in the dataset. This is because many machine learning algorithms, such as linear regression, assume that there is a linear relationship between the target variable and the input variables. If this assumption is violated, the algorithm may not perform well and may produce inaccurate predictions.** # **If there is a linear relationship between the target variable and the input variables, it may be possible to improve the performance of the machine learning algorithm by including additional features that capture non-linear relationships or by transforming the input variables to create a more linear relationship.** # **If there is no linear relationship between the target variable and the input variables, it may be necessary to use a non-linear machine learning algorithm or to engineer new features that better capture the underlying relationship between the variables.** selected_columns = [ "LotFrontage", "LotArea", "YearBuilt", "MasVnrArea", "GrLivArea", "GarageCars", "GarageArea", "OverallQual", "TotalBsmtSF", ] for column_name in selected_columns: fig = px.scatter( df_train, x="SalePrice", y=column_name, opacity=0.65, trendline="ols", trendline_color_override="darkblue", ) fig.show() # ### Let's do some Feature Engineering! I think we need it :) # * **I remove unnecessary columns Based on what I know ! :)** df_train.drop(["Id", "Street", "YearRemodAdd", "MiscVal"], axis=1, inplace=True) # * **Change the type of the 'MSSubClass', 'OverallCond', 'YrSold' and 'MoSold' because these are contains numerical values but these numbers are has meaning by their numeric values (Check the data_description.txt)** df_train["MSSubClass"] = df_train["MSSubClass"].apply(str) df_train["OverallCond"] = df_train["OverallCond"].astype(str) df_train["YrSold"] = df_train["YrSold"].astype(str) df_train["MoSold"] = df_train["MoSold"].astype(str) # **Lets go to the df_test dataset** # # Same Steps for the df_test Dataset print("**Dataset:**") print("-------------df_test-------------------\n") display(df_test.head()) print("--------------df_test shape ------------------\n") print("**Dataset shape:**", df_test.shape) print("--------------df_test columns ------------------\n") display(df_test.columns) print("--------------------df_test description----------------------------") display(df_test.describe().T.head(10)) print("--------------------df_test Types----------------------------------") display(df_test.dtypes.sort_values(ascending=False)) print("--------------------df_test NA Check-------------------------------") display(df_test.isna().sum().sort_values(ascending=False).head(20)) print("--------------------Null values ratio-----------------------\n") display(df_test.isnull().mean() * 100) print("--------------------Plotting the ratio of null values-----------------\n") (df_test.isnull().mean() * 100).plot.bar(figsize=(12, 6)) plt.ylabel("Percentage of missing values %") plt.xlabel("Variables") plt.title("Quantifying missing data") df_test.columns[df_test.isnull().mean() * 100 > 20] df_test = df_test.drop( columns=["Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature"], axis=1 ) display(df_test) for column in df_test.columns: if df_test[column].dtype == np.object: df_test[column].fillna(df_test[column].mode()[0], inplace=True) elif df_test[column].dtype == np.float64: df_test[column].fillna(df_test[column].median(), inplace=True) elif df_test[column].dtype == np.int64: df_test[column].fillna(df_test[column].median(), inplace=True) print("---------------Checking for the null value again-----------------\n") display(df_test.isnull().sum()) print("The totall amount of null values:", df_test.isnull().sum().sum()) print("---------------------The final train data set-------------------\n") display(df_test) numeric = ["int64", "float64"] for column in df_test.columns: if df_test[column].dtypes in numeric: skew = df_test[column].skew() if skew < -1 or skew > 1: print(f"The skewness of {column} is : {skew}.\n") df_test[column] = np.log1p(df_test[column]) new_skew = df_test[column].skew() print( f"After apply log transform, The skewness of {column} is: {new_skew}." ) print("-----------------------------------------\n") else: continue def histo_plot(data, column_name_str): fig = px.histogram( data, x=column_name_str, template="simple_white", color_discrete_sequence=["red"], ) fig.update_layout( xaxis_title=column_name_str, yaxis_title="Frequency", showlegend=True, font=dict(size=14), title={"text": "Distribution", "y": 0.95, "x": 0.5}, ) # Display fig.show() print("----------------Skewness value-----------------\n") print(data[column_name_str].skew()) histo_plot(df_test, "PoolArea") histo_plot(df_test, "LotArea") df_test.drop(["PoolArea"], axis=1, inplace=True) object_features = [] for column_name in df_test.columns: if df_test[column_name].dtype == "object": object_features.append(column_name) print( f"----------------Object type features name ----------> number of object features:{len(object_features)} <----------\n " ) display(object_features) label_encode = LabelEncoder() for column in df_test.columns: if df_test[column].dtype == "object": df_test[column] = label_encode.fit_transform(df_test[column]) df_test.head() df_test.drop(["Street", "YearRemodAdd", "MiscVal"], axis=1, inplace=True) df_test["MSSubClass"] = df_test["MSSubClass"].apply(str) df_test["OverallCond"] = df_test["OverallCond"].astype(str) df_test["YrSold"] = df_test["YrSold"].astype(str) df_test["MoSold"] = df_test["MoSold"].astype(str) # # Regression Models # ### **The models that I used in this task `Linear Regression`,`Lasso Regression`,`Ridge Regression`,`Support Vector Regressor`,`KNeighbours Regressor` and `Random Forest Regressor`** # **Let's make our evaluation metrics function that we want to know from our model** def evaluation(y_actual, y_predicted): mse = mean_squared_error(y_actual, y_predicted) rmse = np.sqrt(mse) accuracy = r2_score(y_actual, y_predicted) return mse, rmse, accuracy # **Define X and Y and X_test from test dataset (I removed the Id before :))** X = df_train.drop(columns=["SalePrice"]) Y = df_train["SalePrice"] X_df_test = df_test # **Split data into train and test with the test size of 30%** X_train, X_test, Y_train, Y_test = train_test_split( X, Y, random_state=42, test_size=0.3 ) # ## Linear Regression lr = LinearRegression() lr.fit(X_train, Y_train) y_pred_lr = lr.predict(X_test) mse_lr, rmse_lr, accuracy_lr = evaluation(Y_test, y_pred_lr) # ## Lasso Regression # lasso = Lasso() lasso.fit(X_train, Y_train) y_pred_lasso = lasso.predict(X_test) mse_lasso, rmse_lasso, accuracy_lasso = evaluation(Y_test, y_pred_lasso) # ## Ridge Regression ridge = Ridge() ridge.fit(X_train, Y_train) y_pred_ridge = ridge.predict(X_test) mse_ridge, rmse_ridge, accuracy_ridge = evaluation(Y_test, y_pred_ridge) # ## Support Vector Regressor # svr = SVR() svr.fit(X_train, Y_train) y_pred_svr = svr.predict(X_test) mse_svr, rmse_svr, accuracy_svr = evaluation(Y_test, y_pred_svr) # ## KNeighbours Regressor knr = KNeighborsRegressor() knr.fit(X_train, Y_train) y_pred_knr = knr.predict(X_test) mse_knr, rmse_knr, accuracy_knr = evaluation(Y_test, y_pred_knr) # ## Random Forest Regressor rfr = RandomForestRegressor( n_estimators=7000, max_depth=15, min_samples_split=5, min_samples_leaf=5, max_features=None, oob_score=True, random_state=42, ) rfr.fit(X_train, Y_train) y_pred_rfr = rfr.predict(X_test) mse_rfr, rmse_rfr, accuracy_rfr = evaluation(Y_test, y_pred_rfr) models = [ "Linear Regression", "Lasso Regression", "Ridge Regression", "Support Vector Regressor", "KNeighbours Regressor", "Random Forest Regressor", ] rmse_scores = [rmse_lr, rmse_lasso, rmse_ridge, rmse_svr, rmse_knr, rmse_rfr] mse_scores = [mse_lr, mse_lasso, mse_ridge, mse_svr, mse_knr, mse_rfr] accuracy_scores = [ accuracy_lr, accuracy_lasso, accuracy_ridge, accuracy_svr, accuracy_knr, accuracy_rfr, ] model_scores = pd.DataFrame( { "Regression Models": models, "RMSE Scores": rmse_scores, "MSE_Scores": mse_scores, "Accuracy_Scores": accuracy_scores, } ) results = model_scores.sort_values("RMSE Scores", ascending=True, ignore_index=True) results # # Submission prediction = rfr.predict(df_test.drop(columns=["Id"], axis=1)) ids = df_test["Id"] Final_sub = pd.DataFrame({"Id": ids, "SalePrice": prediction}) Final_sub.to_csv("final_submission.csv", index=False) Final_sub
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas_profiling # Profiling is a process that helps us on understanding the data import plotly.offline # importing plotly in offline mode import cufflinks as cf # importing cufflinks in offline mode cf.go_offline() cf.set_config_file(offline=False, world_readable=True) import pprint # from IPython.core.interactiveshell import ( InteractiveShell, ) # Printing all the outputs of a cell InteractiveShell.ast_node_interactivity = "all" # import pdb #python debugger # pdb.pm() gender_submission = pd.read_csv("../input/titanic/gender_submission.csv") test_df = pd.read_csv("../input/titanic/test.csv") train_df = pd.read_csv("../input/titanic/train.csv") train_df.profile_report() train_df.iplot() pprint.pprint(test_df.head(5), width=50) # # Tip: Use blue boxes (alert-info) for tips and notes. # If it’s a note, you don’t have to include the word “Note”. # Example: Yellow Boxes are generally used to include additional examples or mathematical formulas. # Use green box only when necessary like to display links to related content. # It is good to avoid red boxes but can be used to alert users to not delete some important part of code etc. # test_df.head(5) train_df.head(5) # # drop unnecessary columns, these columns won't be useful in analysis and prediction titanic_df = train_df.drop(["PassengerId", "Name", "Ticket"], axis=1) test_df = test_df.drop(["Name", "Ticket"], axis=1) # # Embarked # only in titanic_df, fill the two missing values with the most occurred value, which is "S". titanic_df["Embarked"] = titanic_df["Embarked"].fillna("S") # create dummy variables for Embarked feature dummeis_embark_titanic = pd.get_dummies(titanic_df["Embarked"]) dummeis_embark_test = pd.get_dummies(test_df["Embarked"]) # merge the dummies titanic_df = titanic_df.join(dummeis_embark_titanic) test_df = test_df.join(dummeis_embark_test) # # Feature: Fare # there is a missing "Fare" values in test_df test_df["Fare"].fillna(test_df("Fare").median(), inplace=True) # convert from float to int titanic_df["Fare"] = titanic_df["Fare"].astype(int) test_df["Fare"] = test_df["Fare"].astype(int) # # Feature: Age # # Feature: Cabin # It have lot of NaN values,hence it won't cause a remarkable impact on prediction titanic_df.drop("Cabin", axis=1, inplace=True) test_df.drop("Cabin", axis=1, inplace=True)
import pandas as pd import numpy as np # # Reading the Movie Rating dataset # Reading the file: movie_dataset = pd.read_csv("../input/movie_ratings.csv") # Dropping user name column: movie_ratings = movie_dataset.drop("users", axis=1) print(movie_ratings.head(5)) print(" Shape of movie matrix is :", movie_ratings.shape) # # Content based Matrix factorization using regularization: # Function for matrix factorization: def matrix_factorization(R, P, Q, K, steps=10000, alpha=0.0002, beta=0.02): Q = Q.T for step in range(steps): for i in range(len(R)): for j in range(len(R[i])): if R[i][j] > 0: eij = R[i][j] - np.dot(P[i, :], Q[:, j]) for k in range(K): P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k]) Q[k][j] = Q[k][j] + alpha * (2 * eij * P[i][k] - beta * Q[k][j]) eR = np.dot(P, Q) e = 0 for i in range(len(R)): for j in range(len(R[i])): if R[i][j] > 0: e = e + pow(R[i][j] - np.dot(P[i, :], Q[:, j]), 2) for k in range(K): e = e + (beta / 2) * (pow(P[i][k], 2) + pow(Q[k][j], 2)) # print(e) if e < 100: break return P, Q.T, e # # Defining all the parameters and calling the function: # Defining all the parameters: R = np.array(movie_ratings) N = len(R) M = len(R[0]) K = 3 P = np.random.rand(N, K) # print(P.shape) Q = np.random.rand(M, K) # print(Q.shape) # Calling the function: nP, nQ, e = matrix_factorization(R, P, Q, K) print("Breakout Error form the function is", e) # Combining it again as a full matrix nR = np.dot(nP, nQ.T) print(nR) # # Comparing matrices and inserting predicted values in the original matrix : # Replacing all the movie rating by -1 that are already watched by users: def eliminating_watched_movies(R): for i in range(len(R)): for j in range(len(R[i])): if not np.isnan(R[i][j]): R[i][j] = -1 return R # Adding predictions to this matrix: def Adding_predictions(R, nR): # First calling the function eliminating_watched_movies: eliminating_watched_movies(R) for i in range(len(R)): for j in range(len(R[i])): if np.isnan(R[i][j]): R[i][j] = nR[i][j] return R # # Preparing the final matrix: # Final Matrix of ratings to use for recommendation: Adding_predictions(R, nR) # # Adding column name and indexes:: # Defing a list ranging 1-50 to add 50 users: mylist = list(range(1, 51)) print(mylist) # Add indexes and column name: Ratings = pd.DataFrame( R, columns=[ "movie1", "movie2", "movie3", "movie4", "movie5", "movie6", "movie7", "movie8", "movie9", "movie10", ], index=mylist, ) print(Ratings) # # Sorting the ratings: # For each user recommendation ratings are sorted: def sorted_ratings(): for i in range(len(mylist)): a = Ratings.iloc[i] print(a.sort_values(ascending=False)) sorted_ratings() # We can pick top 2 or top 3 movies to send recommendation to users. # # Top 2 recommended movie for a specific user: def top_2_recommendations_for_user(user_id): # Using loc to get index based values: a = Ratings.loc[user_id].sort_values(ascending=False) if a[0] > 0 and a[1] > 0: print(a[0:2]) if a[0] > 0 and a[1] < 0: print(a[0:1]) if a[0] < 0: print("No Recommendation") top_2_recommendations_for_user(3) # There is no user who has watched all the movies # Try only one recommendation with movie 48 # Try two recommendation with movie 1
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv("/kaggle/input/data-science-london-scikit-learn/train.csv") test = pd.read_csv("/kaggle/input/data-science-london-scikit-learn/test.csv") train_y = pd.read_csv("/kaggle/input/data-science-london-scikit-learn/trainLabels.csv") train.head() test.head() sorted(train.columns)[:5] sorted(test.columns)[:5] train_y.head() train_y_cols = np.array(train_y.columns) train_y_cols = train_y_cols.reshape(1, train_y_cols.shape[0]) pd.DataFrame(train_y_cols, columns=["1"]) train_y_with_column = pd.concat( [train_y, pd.DataFrame(train_y_cols, columns=["1"])], axis=0, ignore_index=True ) train_y_with_column.head() train_y_with_column.rename(columns={1: "y"}, inplace=True) train_y_with_column.info() train_y_with_column = train_y_with_column.astype(float) y = train_y_with_column.values y.shape train.shape test.shape train_cols = np.array(train.columns) test_cols = np.array(test.columns) train_cols = train_cols.reshape(1, train_cols.shape[0]) test_cols = test_cols.reshape(1, test_cols.shape[0]) pd.DataFrame(train_cols) pd.DataFrame(test_cols) new_columns_train = dict() new_columns_test = dict() cols_train = list(train.columns) cols_test = list(test.columns) for i in range(train.shape[1]): new_columns_train[cols_train[i]] = i new_columns_test[cols_test[i]] = i train.rename(columns=new_columns_train, inplace=True) test.rename(columns=new_columns_test, inplace=True) train_with_column = pd.concat( [train, pd.DataFrame(train_cols)], axis=0, ignore_index=True ) test_with_column = pd.concat([test, pd.DataFrame(test_cols)], axis=0, ignore_index=True) train_with_column.tail() test_with_column.tail() train_test = pd.concat([train_with_column, test_with_column], axis=0, ignore_index=True) train_with_column.shape test_with_column.shape train_test.shape # 1000 train, 9000 test train_test.head() train_test.isna().sum() train_test.info() train_test = train_test.astype(float) train_test.corr() X = train_with_column.values y = train_y_with_column.values X_submission = test_with_column.values print("X.shape: {}".format(X.shape)) print("y.shape: {}".format(y.shape)) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) import xgboost as xgb xgb_model = xgb.XGBClassifier() xgb_model.fit(X_train, y_train) xgb_model.score(X_test, y_test) xgb_pred = xgb_model.predict(X_submission) xgb_pred = pd.DataFrame(xgb_pred, columns=["Solution"]) xgb_pred = xgb_pred.reset_index() xgb_pred.rename(columns={"index": "Id"}, inplace=True) xgb_pred["Id"] = xgb_pred["Id"] + 1 xgb_pred["Solution"] = xgb_pred["Solution"].astype(int) xgb_pred.head() xgb_pred.tail() xgb_pred.to_csv("Submission.csv", index=False)
# ### Importing the usual suspects try: import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use("ggplot") from nltk.corpus import stopwords from nltk.util import ngrams from nltk.stem import WordNetLemmatizer, SnowballStemmer from sklearn.feature_extraction.text import CountVectorizer stop = set(stopwords.words("english")) stop = stop.union(set(["http", "https"])) import gensim import re import string import time print("Success: Packages Loaded!") except: print("Error: One or more packages failed to load.") # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. pwd # ### Reading Data and other checks. data = pd.read_csv("train.csv") data.head() test = pd.read_csv("test.csv") print("Rows = {}, Colums = {}".format(data.shape[0], data.shape[1])) shape1 = (data.shape[0], data.shape[1]) data = data[data["text"].notnull()] if data.shape == shape1: print("Data Consistent") else: print("Data Inconsistent") sns.set_style("whitegrid") x = data.target.value_counts() sns.barplot(x.index, x) plt.gca().set_ylabel("samples") print("0: Not Disaster Tweets, 1: Disaster Tweets") def createCorpus(df, target): corpus = [] for i in df[df["target"] == target]["text"]: text = i.split() corpus.extend([w.lower() for w in text if w.lower() not in stop]) return corpus # ### Cleaning df = pd.concat([data, test], sort=False) df.shape def remove_URL(text): url = re.compile(r"https?://\S+|www\.\S+") return url.sub(r"", text) def remove_html(text): html = re.compile(r"<.*?>") return html.sub(r"", text) def remove_emoji(text): emoji_pattern = re.compile( "[" "\U0001F600-\U0001F64F" # emoticons "\U0001F300-\U0001F5FF" # symbols & pictographs "\U0001F680-\U0001F6FF" # transport & map symbols "\U0001F1E0-\U0001F1FF" # flags (iOS) "\U00002702-\U000027B0" "\U000024C2-\U0001F251" "]+", flags=re.UNICODE, ) return emoji_pattern.sub(r"", text) def remove_punct(text): table = str.maketrans("", "", string.punctuation) return text.translate(table) df["text"] = df["text"].apply(lambda x: remove_URL(x)) print("[CLEAN] Removing URLs") time.sleep(0.5) df["text"] = df["text"].apply(lambda x: remove_html(x)) print("[CLEAN] Removing HTML Tags") time.sleep(0.5) df["text"] = df["text"].apply(lambda x: remove_emoji(x)) print("[CLEAN] Removing Emoticons") time.sleep(0.5) df["text"] = df["text"].apply(lambda x: remove_punct(x)) print("[CLEAN] Removing Punctuations") time.sleep(0.5) print("Success: Text Cleaned!") def createCorpusNGrams(tweetCorpus, n=2): output = [] s = [i.lower() for i in tweetCorpus] s = [re.sub(r"[^a-zA-Z0-9\s]", " ", i) for i in s] for i in range(1, n + 1): output = list(ngrams(s, i)) return output def ngramCreation(notes, num_ngram=2): ngramList = [] if len(notes) == 0: return "" for nrange in range(1, num_ngram + 1): ngramss = ngrams(notes, nrange) for grams in ngramss: ngramList = ngramList + ["_".join(list(grams))] return ngramList def lemmatize_stemming(text): # return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v')) return WordNetLemmatizer().lemmatize(text, pos="v") def preprocess(text): result = [] for token in gensim.utils.simple_preprocess(text): if (token not in stop) and (len(token) >= 3): result.append(lemmatize_stemming(token)) if len(result) == 0: continue return result try: df["text"] = df["text"].apply(preprocess) df["text"] = df["text"].apply(ngramCreation) print("Success: ngrams Created") except: print( "Error: Either Operation has already been performed or it cannot be completed at the moment." ) try: tweetCorpus = createCorpus(df, 1) print("Success: Corpus Created\nTotal Words = {}".format(len(tweetCorpus))) except: print("Error: Corpus Creation Failed!") text = list(df["text"]) dictionary = gensim.corpora.Dictionary(textData) # dictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000) print("Success: Dictionary Generated from df['text']") dataset = ["driving car ", "drive car carefully", "student and university"] # be sure to split sentence before feed into Dictionary dataset = [d.split() for d in dataset] vocab = gensim.corpora.Dictionary(dataset) print(vocab) print(dictionary) bow_corpus = [dictionary.doc2bow(doc) for doc in textData] bow_doc_2 = bow_corpus[1] for i in range(len(bow_doc_2)): print( 'Word {} ("{}") appears {} time.'.format( bow_doc_2[i][0], dictionary[bow_doc_2[i][0]], bow_doc_2[i][1] ) ) for i in range(10): print(dictionary[i]) tfidf = models.TfidfModel(corpus, smartirs="ntc")
# ## Goal # In this notebook, we apply the Intelligent search methods like Differential Evolution Algorithm to find the best ML algorithm hyper-parameters. # Previous options are using either predetermined or randomly generated parameters for the ML algorithms. # Some of these searching methods are actually a simulation of Intelligent agents in nature like the folk of birds or school of fishes. # ![giphy.webp](attachment:giphy.webp) # * GIF Ref: https://giphy.com/gifs/c4d-human-ai-8hYQgBIIHkCPjRTmai import pandas as pd import numpy as np import csv from matplotlib import pyplot as plt from matplotlib import pyplot from sklearn.model_selection import train_test_split from xgboost import XGBClassifier from sklearn.metrics import make_scorer, accuracy_score import xgboost as xgb from sklearn.model_selection import cross_val_score # ## Searching Algorithm of your choice # Lets use DE Algorithm. # * Reference for algorithm codes: https://pablormier.github.io/2017/09/05/a-tutorial-on-differential-evolution-with-python/# # def De_Algorithm(fobj, bounds, mut=0.8, crossp=0.7, popsize=100, its=1000): dimensions = len(bounds) pop = np.random.rand(popsize, dimensions) min_b, max_b = np.asarray(bounds).T diff = np.fabs(min_b - max_b) pop_denorm = min_b + pop * diff fitness = np.asarray([fobj(ind) for ind in pop_denorm]) best_idx = np.argmin(fitness) best = pop_denorm[best_idx] for i in range(its): for j in range(popsize): idxs = [idx for idx in range(popsize) if idx != j] a, b, c = pop[np.random.choice(idxs, 3, replace=False)] mutant = np.clip(a + mut * (b - c), 0, 1) cross_points = np.random.rand(dimensions) < crossp if not np.any(cross_points): cross_points[np.random.randint(0, dimensions)] = True trial = np.where(cross_points, mutant, pop[j]) trial_denorm = min_b + trial * diff f = fobj(trial_denorm) if f < fitness[j]: fitness[j] = f pop[j] = trial if f < fitness[best_idx]: best_idx = j best = trial_denorm # print("Iteration number= %s" % (i)) # print("Best Fitness= %s" % (fitness[best_idx])) # print("Best values= %s" % (best)) yield best, fitness[best_idx] # ## ML algorithm of your choice # Let's use the most Common ML Competition algorithm which is XGBoost. def xgb2(X_training, y_training, X_valid, y_valid, w): w[1] = round(w[1]) w[2] = round(w[2]) w[6] = round(w[6]) w[7] = round(w[7]) w[8] = round(w[8]) w[9] = round(w[9]) w[10] = round(w[10]) params = { "eta": w[0], # 0.3, "tree_method": "hist", "grow_policy": "lossguide", "max_leaves": w[1], # 1400, "max_depth": w[2], # 0, "subsample": w[3], # 0.9, "colsample_bytree": w[4], # 0.7, "colsample_bylevel": w[5], # 0.7, "min_child_weight": w[6], # 0, "alpha": w[7], # 4, "objective": "binary:logistic", "scale_pos_weight": w[8], # 9, "eval_metric": "auc", "nthread": w[9], # 8, "random_state": w[10], # 99, "silent": True, } dtrain = xgb.DMatrix(X_training, y_training) dvalid = xgb.DMatrix(X_valid, y_valid) watchlist = [(dtrain, "train"), (dvalid, "valid")] model = xgb.train( params, dtrain, 100, watchlist, maximize=True, early_stopping_rounds=25, verbose_eval=0, ) # make predictions for test data X_valid = xgb.DMatrix(X_valid) y_pred = model.predict(X_valid, ntree_limit=model.best_ntree_limit) predictions = [round(value) for value in y_pred] # evaluate predictions accuracy = accuracy_score(y_valid, predictions) # print("Accuracy: %.2f%%" % (accuracy * 100.0)) return model, accuracy # ## Data-set of your choice # Let's use the TalkingData set which is available on Kaggle. # address_train = '../input/talkingdata-adtracking-fraud-detection/train_sample.csv' address_train = "../input/talkingdata-adtracking-fraud-detection/train.csv" # ### Some handy functions def Drop_cols(df, x): df.drop(labels=x, axis=1, inplace=True) return df def Plot_Hist_column(df, x): pyplot.hist(df[x], log=True) pyplot.title(x) pyplot.show() def Plot_Hist_columns(df, xlist): [Plot_Hist_column(df, x) for x in xlist] pyplot.show() def Make_X_Y(df): Y = pd.DataFrame() Y["is_attributed"] = df["is_attributed"] X = df.copy() X.drop(labels=["is_attributed"], axis=1, inplace=True) return X, Y def Train_Test_training_valid(X, Y, ratio): Num_of_line = 100 X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=ratio) X_training, X_valid, y_training, y_valid = train_test_split( X_train, y_train, test_size=ratio, random_state=0 ) return X_training, y_training, X_valid, y_valid def read_train_test_data_balanced(address_train): # Read Training data, all class 1 and add same amount 0 iter_csv = pd.read_csv( address_train, iterator=True, chunksize=10000000, parse_dates=["click_time"] ) df_train_1 = pd.concat([chunk[chunk["is_attributed"] > 0] for chunk in iter_csv]) iter_csv = pd.read_csv( address_train, iterator=True, chunksize=10000000, parse_dates=["click_time"], skiprows=range(1, 120000000), nrows=2000000, ) df_train_0 = pd.concat([chunk[chunk["is_attributed"] == 0] for chunk in iter_csv]) # seperate same number values as train data with class 1 df_train_0 = df_train_0.head(len(df_train_1)) # Merge 0 and 1 data df_train = Merge_data(df_train_1, df_train_0) return df_train def Merge_data(df1, df2): frames = [df1, df2] df = pd.concat(frames) return df # ## Preparing a dataset # read data balanced. Read all 1 values from the train data set and then add the same number of 0 and keep it. Now we do have a balance data set with an equal number of 0 and 1. df_train = read_train_test_data_balanced(address_train) df_train.head(3) # See the output paramters distribution xlist = ["is_attributed"] Plot_Hist_columns(df_train, xlist) # Lets drop click_time and attributed_time for sack of simplicity df_train.drop(labels=["click_time", "attributed_time"], axis=1, inplace=True) # Devide data set to input (X) and output (Y) paramters. X, Y = Make_X_Y(df_train) X_training, y_training, X_valid, y_valid = Train_Test_training_valid(X, Y, 0.1) # check out the ML algorithm and make sure it works. # Here you can run the XGBoost algorithm on data with your favorite hyperparameters. # w = [learning_rate, # n_estimators, # max_depth, # min_child_weight, # gamma, # subsample, # colsample_bytree, # nthread, # scale_pos_weight] """w = [0.1, 3, 3, 1, 0, 0.8, 0.8, 4, 1] Trained_XGBoost_Model, XGBoost_accuracy = Train_XGBoost(X_training, y_training, X_valid, y_valid, w)""" # check out the ML algorithm and make sure it works. # Here you can run the XGBoost algorithm on data with your favorite hyperparameters. # w = [learning_rate, # n_estimators, # max_depth, # min_child_weight, # gamma, # subsample, # colsample_bytree, # nthread, # scale_pos_weight] w = [0.1, 1400, 0, 0.9, 0.7, 0.7, 0, 4, 9, 8, 99] model2, accuracy = xgb2(X_training, y_training, X_valid, y_valid, w) """#Define an objective funtion. def Objective_Function(w): w = w.tolist() Trained_XGBoost_Model, XGBoost_accuracy = Train_XGBoost(X_training, y_training, X_valid, y_valid, w) return (1-XGBoost_accuracy)""" # Define an objective funtion. def Objective_Function2(w): w = w.tolist() model2, accuracy = xgb2(X_training, y_training, X_valid, y_valid, w) return 1 - accuracy """#Run the DE algorithm on objective function in your favorite range of hyperparameters. result = list(De_Algorithm(Objective_Function, [(0.001, 1), # learning_rate (3, 1000), # n_estimators (2, 20), # max_depth (1, 20), # min_child_weight (0.001, 1), # gamma (0.001, 1), # subsample (0.001, 1), # colsample_bytree (2, 8), # nthread (1, 8)], # scale_pos_weight] mut=0.4, crossp=0.8, popsize=10, its=30))""" # Run the DE algorithm on objective function in your favorite range of hyperparameters. result = list( De_Algorithm( Objective_Function2, [ (0.001, 1), # eta (3, 1500), # max_leaves (0, 20), # max_depth (0, 1), # subsample (0.001, 1), # colsample_bytree (0.001, 1), # colsample_bylevel (0.001, 1), # min_child_weight (2, 8), # alpha (1, 10), # scale_pos_weight (1, 10), # nthread (1, 10), ], # random_state mut=0.4, crossp=0.8, popsize=10, its=40, ) ) df = pd.DataFrame(result) # seperate the best of hyperparamters. def Best_coffs(df): # df['w1'], df['w2'], df['w3'], df['w4'], df['w5'], df['w6'], df['w7'], df['w8'], df['w9'] = zip(*df[0]) # Unzip ( df["w1"], df["w2"], df["w3"], df["w4"], df["w5"], df["w6"], df["w7"], df["w8"], df["w9"], df["w10"], df["w11"], ) = zip( *df[0] ) # Unzip cols = [0] # Drop the first column df.drop(df.columns[cols], axis=1, inplace=True) # Drop the first column df.columns.values[0] = "Fitness" # name the first column as Fitness best_coff = df.iloc[ len(df) - 1, 1: ] # insert the best coefficients into the best_coff return best_coff Best_coffs(df) def Plot_DEA_Evolution(df): data_ncol = len(df.columns) # number of paramters fig = plt.figure( figsize=(20, 15) ) # you may change these to change the distance between plots. for i in range(1, (data_ncol + 1)): if i < (data_ncol): plt.subplot(3, 4, i) plt.plot(df["w{}".format(i)], "bo", markersize=4) plt.xlabel("Iteration") plt.ylabel("w{}".format(i)) plt.grid(True) else: plt.subplot(3, 4, data_ncol) plt.plot(df["Fitness"], "red", markersize=4) plt.xlabel("Iteration") plt.ylabel("Fitness") plt.grid(True) plt.show() # ## Visualization of searching progress Plot_DEA_Evolution(df) df = pd.DataFrame(result) def Best_coffs(df): # df['learning_rate'],df['n_estimators'], df['max_depth'],df['min_child_weight'], df['gamma'],df['subsample'], df['colsample_bytree'],df['nthread'], df['scale_pos_weight'] = zip(*df[0]) # Unzip ( df["eta"], df["max_leaves"], df["max_depth"], df["subsample"], df["colsample_bytree"], df["colsample_bylevel"], df["min_child_weight"], df["alpha"], df["scale_pos_weight"], df["nthread"], df["random_state"], ) = zip( *df[0] ) # Unzip cols = [0] # Drop the first column df.drop(df.columns[cols], axis=1, inplace=True) # Drop the first column df.columns.values[0] = "Fitness" # name the first column as Fitness best_Parameters = df.iloc[ len(df) - 1, 1: ] # insert the best coefficients into the best_coff return best_Parameters def print_hyper_parameters(df): """best_Parameters = Best_coffs(df) best_Parameters[1] = round(best_Parameters[1]) best_Parameters[2] = round(best_Parameters[2]) best_Parameters[3] = round(best_Parameters[3]) best_Parameters[7] = round(best_Parameters[7]) best_Parameters[8] = round(best_Parameters[8])""" best_Parameters = Best_coffs(df) best_Parameters[1] = round(best_Parameters[1]) best_Parameters[2] = round(best_Parameters[2]) best_Parameters[6] = round(best_Parameters[6]) best_Parameters[7] = round(best_Parameters[7]) best_Parameters[8] = round(best_Parameters[8]) best_Parameters[9] = round(best_Parameters[9]) best_Parameters[10] = round(best_Parameters[10]) print(best_Parameters) print_hyper_parameters(df) # ## Train XGBoost using best hyperparamters def xgb2(X_training, y_training, X_valid, y_valid, w): w[1] = round(w[1]) w[2] = round(w[2]) w[6] = round(w[6]) w[7] = round(w[7]) w[8] = round(w[8]) w[9] = round(w[9]) w[10] = round(w[10]) params = { "eta": w[0], # 0.3, "tree_method": "hist", "grow_policy": "lossguide", "max_leaves": w[1], # 1400, "max_depth": w[2], # 0, "subsample": w[3], # 0.9, "colsample_bytree": w[4], # 0.7, "colsample_bylevel": w[5], # 0.7, "min_child_weight": w[6], # 0, "alpha": w[7], # 4, "objective": "binary:logistic", "scale_pos_weight": w[8], # 9, "eval_metric": "auc", "nthread": w[9], # 8, "random_state": w[10], # 99, "silent": True, } dtrain = xgb.DMatrix(X_training, y_training) dvalid = xgb.DMatrix(X_valid, y_valid) watchlist = [(dtrain, "train"), (dvalid, "valid")] model = xgb.train( params, dtrain, 1000, watchlist, maximize=True, early_stopping_rounds=25, verbose_eval=5, ) # make predictions for test data X_valid = xgb.DMatrix(X_valid) y_pred = model.predict(X_valid, ntree_limit=model.best_ntree_limit) predictions = [round(value) for value in y_pred] # evaluate predictions accuracy = accuracy_score(y_valid, predictions) # print("Accuracy: %.2f%%" % (accuracy * 100.0)) return model, accuracy df = pd.DataFrame(result) w = list(Best_coffs(df)) Trained_Model, accuracy = xgb2(X_training, y_training, X_valid, y_valid, w) # ## Read Test data, edit it, Fit, and Predict using hyperparameters and XGBoost address_test = "../input/talkingdata-adtracking-fraud-detection/test.csv" df_test = pd.read_csv(address_test, parse_dates=["click_time"]) df_test.head() # Lets drop click_time and attributed_time for sack of simplicity df_test.drop(labels=["click_time", "click_id"], axis=1, inplace=True) df_test.head() def predict_And_Submit_using_xgb(df, Trained_Model): Num_of_line = 100 print(Num_of_line * "=") # sub = pd.DataFrame() # sub['click_id'] = df['click_id'].astype('int') # df['clicks_by_ip'] = df['clicks_by_ip'].astype('uint16') data_to_submit = pd.DataFrame() data_to_submit["click_id"] = range(0, len(df)) dtest = xgb.DMatrix(df) del df predict = Trained_Model.predict(dtest, ntree_limit=Trained_Model.best_ntree_limit) data_to_submit["is_attributed"] = predict print(Num_of_line * "=") print("data_to_submit = \n", data_to_submit.head(5)) pyplot.hist(data_to_submit["is_attributed"], log=True) # data_to_submit.to_csv('Amin_csv_to_submit.csv', index = False) return data_to_submit data_to_submit = predict_And_Submit_using_xgb(df_test, Trained_Model) data_to_submit2 = pd.DataFrame() data_to_submit2["is_attributed"] = [ 0 if i < 0.1 else 1 for i in data_to_submit["is_attributed"] ] pyplot.hist(data_to_submit2["is_attributed"], log=True) data_to_submit2 = pd.DataFrame() data_to_submit2["is_attributed"] = [ 0 if i < 0.2 else 1 for i in data_to_submit["is_attributed"] ] pyplot.hist(data_to_submit2["is_attributed"], log=True) data_to_submit2 = pd.DataFrame() data_to_submit2["is_attributed"] = [ 0 if i < 0.3 else 1 for i in data_to_submit["is_attributed"] ] pyplot.hist(data_to_submit2["is_attributed"], log=True) data_to_submit2 = pd.DataFrame() data_to_submit2["is_attributed"] = [ 0 if i < 0.4 else 1 for i in data_to_submit["is_attributed"] ] pyplot.hist(data_to_submit2["is_attributed"], log=True) data_to_submit.to_csv("Amin_csv_to_submit.csv", index=False)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input/glass/glass.csv"): for filename in filenames: print(os.path.join(dirname, glass / glass.csv)) # Any results you write to the current directory are saved as output. # defining path os.path.isfile("/kaggle/input/glass/glass.csv") # importing data Shan = pd.read_csv("/kaggle/input/glass/glass.csv") Shan.tail(10) Shan.describe() missing_values = Shan.isnull() missing_values.head(10) import numpy as np import pandas as pd # to save model import pickle # Import visualization modules import matplotlib.pyplot as plt import seaborn as sns Shan.dtypes sns.pairplot(Shan) Shan.shape mask = np.zeros_like(Shan.corr(), dtype=np.bool) mask[np.triu_indices_from(mask)] = True f, ax = plt.subplots(figsize=(16, 12)) plt.title("Pearson Correlation Matrix", fontsize=25) sns.heatmap( Shan.corr(), linewidths=0.25, vmax=0.7, square=True, cmap="BuGn", # "BuGn_r" to reverse linecolor="w", annot=True, annot_kws={"size": 8}, mask=mask, cbar_kws={"shrink": 0.9}, ) x = Shan[["Al"]] y = Shan["Type"] # Split data into Train and test # Import module to split dataset from sklearn.model_selection import train_test_split # Split data set into training and test sets x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=100 ) # Checking file types created print(x.shape) # print(X_test.head()) # print(y_train.head()) print(y.shape) # Run the model # Import model for fitting from sklearn.linear_model import LogisticRegression # Create instance (i.e. object) of LogisticRegression # model = LogisticRegression() # You can try follwoing variation on above model, above is just default one model = LogisticRegression() # Fit the model using the training data # X_train -> parameter supplies the data features # y_train -> parameter supplies the target labels output_model = model.fit(x, y) # output =x_test # output['vehicleTypeId'] = y_test output_model from sklearn import linear_model # Save the model in pickle # Save to file in the current working directory pkl_filename = "pickle_model.pkl" with open(pkl_filename, "wb") as file: pickle.dump(model, file) # Load from file with open(pkl_filename, "rb") as file: pickle_model = pickle.load(file) # Calculate the accuracy score and predict target values score = pickle_model.score(x_test, y_test) # print(score) print("Test score: {0:.2f} %".format(100 * score)) Ypredict = pickle_model.predict(x_test) model.predict(x_train) print(x_test.shape) print(y_test.shape) print(x_test) print(y_test) df = pd.DataFrame({"Actual": y_test, "Predicted": Ypredict.flatten()}) df from sklearn import metrics print("Mean Absolute Error:", metrics.mean_absolute_error(y_test, Ypredict)) print("Mean Squared Error:", metrics.mean_squared_error(y_test, Ypredict)) print("Root Mean Squared Error:", np.sqrt(metrics.mean_squared_error(y_test, Ypredict))) ax = plt.axes() ax.scatter(x, y) plt.title("Input Data and regression line ") ax.plot(x_test, Ypredict, color="Red") ax.set_xlabel("x") ax.set_ylabel("y") ax.axis("tight") plt.show() from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score predictions = model.predict(x_test) # print("",classification_report(y_test, predictions)) # print("confusion_matrix",confusion_matrix(y_test, predictions)) # print("accuracy_score",accuracy_score(y_test, predictions)) ##**Accuracy is a classification metric. You can't use it with a regression. See the documentation for info on the various metrics. # For regression problems you can use: R2 Score, MSE (Mean Squared Error), RMSE (Root Mean Squared Error). # print("Score",score(y_test, X_test)) # score(self, X, y, sample_weight=None) ## setting plot style plt.style.use("fivethirtyeight") ## plotting residual errors in training data plt.scatter( model.predict(x_train), model.predict(x_train) - y_train, color="green", s=1, label="Train data", linewidth=5, ) ## plotting residual errors in test data plt.scatter( model.predict(x_test), model.predict(x_test) - y_test, color="blue", s=1, label="Test data", linewidth=4, ) ## plotting line for zero residual error plt.hlines(y=0, xmin=0, xmax=4, linewidth=2) ## plotting legend plt.legend(loc="upper right") ## plot title plt.title("Residual errors") ## function to show plot plt.show() from sklearn.ensemble import RandomForestRegressor rf_regressor = RandomForestRegressor(n_estimators=28, random_state=0) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import explained_variance_score rf_regressor.fit(x_train, y_train) rf_regressor.score(x_test, y_test) rf_pred = rf_regressor.predict(x_test) rf_score = rf_regressor.score(x_test, y_test) expl_rf = explained_variance_score(rf_pred, y_test) print( "Random Forest regression Model Score is", round(rf_regressor.score(x_test, y_test) * 100), ) # Split data into 'X' features and 'y' target label sets X1 = Shan[["RI", "Na", "Mg", "Al", "Si", "K", "Ca"]] y1 = Shan["Type"] from sklearn.model_selection import train_test_split # Split data set into training and test sets X1_train, X1_test, y1_train, y1_test = train_test_split( X1, y1, test_size=0.25, random_state=100 ) # Run the model # Import model for fitting from sklearn.linear_model import LogisticRegression # Create instance (i.e. object) of LogisticRegression # model = LogisticRegression() # You can try follwoing variation on above model, above is just default one model = LogisticRegression() # Fit the model using the training data # X_train -> parameter supplies the data features # y_train -> parameter supplies the target labels output_model = model.fit(x, y) # output =x_test # output['vehicleTypeId'] = y_test output_model model = LogisticRegression() output_model = model.fit(X1_train, y1_train) output_model pkl_filename = "pickle_model.pkl" with open(pkl_filename, "wb") as file: pickle.dump(model, file) # Load from file with open(pkl_filename, "rb") as file: pickle_model = pickle.load(file) # Calculate the accuracy score and predict target values score = pickle_model.score(X1_test, y1_test) print("Test score: {0:.2f} %".format(100 * score)) Ypredict = pickle_model.predict(X1_test) df = pd.DataFrame({"Actual": y1_test, "Predicted": Ypredict.flatten()}) df
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. df = pd.read_csv("../input/housesalesprediction/kc_house_data.csv") df.head() df.keys() # Check null values df.isnull().sum() # Overview of dataset df.describe().transpose() # Since it is a continuous label, I would like to see a histogram/ distribution of the label plt.figure(figsize=(10, 6)) sns.distplot(df["price"]) # Looks like our houses are falling between 0 and ~1.5 million dollars. We might want to drop the outliers (expensive houses) if they are just a few points. We can then build a model that realistically predict the house price 0-2 million dollars. Since there are not many in the market that are that expensive, it may not be useful for our model to train on these outliers. # **Analyses of different features** # Categorical - Bedrooms plt.figure(figsize=(10, 6)) sns.countplot(df["bedrooms"]) df.corr().price.sort_values() # Square feet living space has a high correlation to the house price # Exploring highly correlated features with the label through SCATTERPLOT plt.figure(figsize=(10, 6)) sns.scatterplot(x="price", y="sqft_living", data=df) # Boxplot of no. of bedrooms and the price plt.figure(figsize=(10, 6)) sns.boxplot(x="bedrooms", y="price", data=df) # There is quite a bit of variation in 3~7 bedrooms , which makes sense because in the countplot from before, majority of the houses have 3~7 bedrooms, meaning there is a large variety of prices. # **Geographical Properties** # Longitude vs. Price plt.figure(figsize=(12, 8)) sns.scatterplot(x="price", y="long", data=df) # Latitude vs. Price plt.figure(figsize=(12, 8)) sns.scatterplot(x="price", y="lat", data=df) # Looking at both Lat and Long with a hue of Price plt.figure(figsize=(12, 8)) sns.scatterplot(x="long", y="lat", hue="price", data=df) # Image of King County in U.S. # ![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfcAAAFeCAYAAACYZlYBAAAgAElEQVR4AeydB5idRdX4f7f3u3f7bjabTa+kh5BOSQKhF0WlKIgKosgnCEoTEBSwgKCACJ8iXXoEAoSEkF4IIY000pPN9nrv3l7e/3PmvXdbNiEN1O+feZ7d+5YpZ87MO6fMOWcMmqZpHE/HMXAcA8cxcBwDxzFwHAP/ZzBglp4kk0mam5uJxWIYDIaj6lwqBckUGIwaZlO6rqNgH4T1EJDk72jZEKkjkQSBymg8qm4eL3wcA8cxcIgYMBhN2Gw2TMb915ZUKkkiHicuH+Z/SZL+mE1Gkok4qS7XNqPqr9kseWJEo3H2y2YwYrPbMRk0YtEoiWQKg8mMzWohGY8ST6S+VGyoMbFaSSVixNSiaMRiNpFKJknKIn4kyWDAZrVhNpv2K62lUiSSCeLx+FGv4/tV/gUPjGYLNouJeDRG4lD7ZjBgMZtJJRMk1SAbsdqsGLQEsVhi//H8Ahi+qtepVAqHw4HP58MgkntjYyPV1dW4XK6jhiGZhHAsyZ4ajR1NCdyFBgwmjggZMkdCTRotjeAtMGBzQTKBTp0PA9LMkhJuhG4WI4O6m7FaDV/5JDsMkI9nPY6B/wMYMCD0vKl6O+/Pnc+effU6l54WKEyubEafOI6xI4bic1tIJBL/8d+kCD+pWJjGpgBGTzbZDgvtlZ/qfbieefM+ZM3nNQwfN41TTxqI2SjCiU7iDUYjWqiJJXPfY/0+mDJ9OsMHFFK5Yy0fLlxLn1HTGDO4EAOpLwEfBiXYNFdvZ+7c5eQPmsykkb0waFEaGxrA4iTL48aIdlhrtvQ7EW7ko7kf8tmWXSTSfZU+p4xWeg8YxqSJ4+hR4E2P837szrGf7wZ9/u3b9AkL1+1j5KQpDC7J+UICrwTcVBx/UyNRk5v8LBeJcA0fzfmIsLM/p548Ao+lbTyPPeBHXqPgW+AvLS1FSe7CTdntdvXgyKvtWLJnD2haBxVODV++AY6AGUyZIJkFNcEkMauRkp4GtHjHdg52l5H0DUaIxaGlBYYNgdLcg5U6/u44Bo5j4FhiwNSwjtdeeIYN+0IU5HhBSyqipRlNzH3/A07/+tXc8fPvUWQ7lq1+eXVtWfgmL87awBW33k53X0Z0aNdeJM6qBW/x6EvLOPlSJ+dfNI2cTsJs885KXnn6cT7aU8wJky6iqKgIrX4NG9aso3jsJZR2L2lX4bG/dCfK2fLZGox9z6a0ezHNO1fy9Mtvc9I3vsfQ0u5H1mAoweJ57/DP2asozM/BJIxQmtiYZr/PgmVnc+c9tzO6p/fI6j/CUvXLXufVN+fSe+r5FJccGl6D5Z/x9POvMfyb1zJySAnEjFTv3ERDQRk9e3XHcYSwfBXFmpqaVDOKuAulz3CVx6pxmcvmZIhQcxyHQ9hWjkh8N5nBbE5SuQ0SUSNma1s9iniL2t4IxnYfjzSlVO8mMBn195Eo+Cs0anNtFGdZMIo6ob1qXjuuqj9WY3+8nuMYaI+BhppqmkJmrrrjEa49dwxaPKo+WC3q519PP8SDf/ktjoIS7r1mBu0+4/ZV/Eddb1y5mPfmbOSc6yLg23+ZjzY34W9JkZOTT8Wni1i8di/njSpt60MqzIKP5rM76aLAZ6CpuVm9y+87ifsfGo4j68uXPtzFw/jlAw9i8+htbV//Ke/PnkfxlK8BZW2wHsZVsKmBmsYwY86/lodvuQKXWSOR0jCRZMOSt7jl7j9zuy2LZ/58K4X2Lpiiw2jrcLI2+/0IwQv4Ww65WMX2dbz1zhzcky6G0aVgyeMHN99L0uTEmiEwh1zbvyejIu5fRtNNgRRV/iSObgZFaDNS9OG2JXh0+4xU7kiq/SB3toGUqOYB2T7J1Jsh7lr7Z0L0hbiL4kAz0hKO8+HyTcSaA7jcdjStPXUX6T6FwWDF6RARQtMrF87BYMFksmA2t6FL9jYMBqP6k7xGk1ldC7yy52Q1m9Sv0WhW3KvFIu/bJrTJJFsDVgWf9EH+FEPSEaTDRdfx/EB1XZJgWMPnNZDlMSkG7zhi/n0YUN+ExUFZ34EM6NunAyC9brqZJfOvZPHiZTRdNYNcCwQb9vLRnDl8vHoLRk8B404+g1MnnIBNfRsx1i+aT0XYRlG+jfnvvg/dRvLty84nxxLhkwVz+GjhJ/g1F5NPP5ep4we1YxiSbPpkMfPmLWJ3fYQe/UZw1jln0LvIo2AKV+/gvXkrcJb1p5AG5s1dSH3CxpjJpzP95LF4rLBu8b945f35VOxr5MnfP4LpqkuZMKJHhz7JvrXB5GLEpKlkhypYsWwdM0aVInKJpJaqXazeUM6ICVMoX7MdsTuQFKjby+LF6+k9ehqj+npYOutNdiVyGNHLx4qF89i0p5k+w07irLOmUprTtoUaDzawdMFcFq5YRyhpZdjYyUw7dTL5HjPJcCOL58wnkd8Tb2gn7y9Yx+BTz2fyCbmsWv4xhYMmUmCp5rnX32Zn+Q5e/8f/UmS9hu62clZtbebkGefRtyjDwMT55KMPWFdp4Oxzp1Mouul2SamEjRbyupUxbMiAdniHAQN6snX5Ev66eimflQco7OuFWDOLPvqQBUtWEUzZGDxqIqdPm0ShV1fh1GxbzXsLP+OEyRNJ7l7Dux+uxFHcj7POPYehvQqAOJ999AFLd4Q548KzKMtxKmhCNTt49a255JwwmXPHDcKkbCRMaosoA27VtrXM+XAhW3aXkzDa6Tv0JM6YfiqlOQ4qt6/imZdeZ0/NXv71tycos13HOVN7s/HT5bTYezFl4nBc0vVEkFVLPmLuwpU0hKD/0BM5ffpUSvN1fDXu3cDchavIHXQiztotfLR0JS2mHCadOo3TJg5Pz+cMRMf+t41aHeO6s7wGSrwm1jXFcbrEIq5N4j6splLg8hrwFRhwZUFBDxMZAq6M7ITIJ/U/VW+Gq9IgGdd0gxcl3RvxFVoIbQlTV7+BbK+dpGZs1VgYjUbK9zRRubeZkiJnqwGfhgHhPQ2KwJsVMU4kNaprgiTCIexWnWBLPqVC0EAIt8FkIZK0YLcaMRtTmEw2VafAKHlNZhsWm4MkRhx2G06HWRluGI0WLFarQlcikSKFkZzcXCwWi1oEjAKH2aozLCmNRCKJyWTCbnekmQ1haMT6UG9JYDGJYYgGVoullRmR1xazBbNFpoBB1ZdhPTIMk+qSVJMePoXf//B/8bjGh+sjbKqPk2U1MqbUypThdjVu/+Gg/98GTxMGW03KDv2Mx2IktZQyVrKZoXnXp9z1y7t4e8nG9FxN8uILr3Px927g5p9cjM+aYM28mfxt5iqyexaxae1nDD3bybnT9/LOS3/md0++RjBhwKAleWPmbK7+2e1ce9lpWA0x3v3HH7nnj89Q2xJTSrtY/GVmvjOPW+66hWkjexCt2c7zD97JUr+VQSV5NFZVUVVbzV+ffoMb7/k9v/juVHZvXs2KdVtoqIvy0bsfMGXy1P2Iu3x8YiBY0HMg47K8zFm2gG3fmMrgfLusVuzY+CnV8RzGj8rljTXblAGyIKViyxJ+9+vH+NrtQxjV18X8fz7Jb9/dxJCh/dACjVRUVdH01N+ZvexG/vjr6ynLtuGv2MwjD9zLc+8sJY4JkyHFC8+/wNgZl3Hv3Tcz0BVkznOPM6/aTJ6hkY3lQa7oeRKDs6r482/v58TvPsQ5gxr5+NM1VNdWsXzhPMZPPBdH3iYee/BF6u1l/Pxb49SYxeu28/c/PcTOnFM484IZHcYxcyOrpSzQsgvbQQuTiBOPxTFbXLicNuKNu/jTA/fw5KvzCWsGrEaN6HPP88opF3HPr25mZM9card8woN330P2+JPJadnLZ5t20xKK8erMOdzzu/s4a0w3Pp31Ive9VU/vSZNbiXugajMP/+5++n/HmSbuaehEUENjzewX+OU9D/N5fVCsP4iFgrREn2H2xT/ij/f/jFD1Lhav+IS6hlo+WfAhK8adw+nj3bz45ENUFlzI6AnDsbdU8tSDv+bR59/DnzBgNUHsxed58Y2zuOeeW5k4uBvN+zbx6N0/Z4etjN4+M4311eyrrOOZF97g1gce5poLTsScWXQzCDyGv18acbfZDAwqtrDpswThHA2n48gM2EQ6N9sgp9hAXXmKnEJweMTCvw0LBuGiukRS20Ox3o8nrMSyuuFw7qKshwOMFpJJTdFk4e4i4RThJj95WboYra9FsiCJYUtM0Uux+I3GNWoSTSSC9WjKylefNBmIkhrEEgZqAmY8tgRuu6in5G3b4qZhVHYAUeyUdM/BkGNFiLlUJ7mEPvtbErRETYwc3QuP20YsJnuV8vmYMRqEIGvs3NlIfV2AojyXKitMisks2hIDjc0xmptiuJ1mJb1arHZVTodEw2B2YrbqjIzZZFIMhDAyNrtIu1KPzgxYxdLZbFVWojarWTEvTqdXMSEZ1YkwDyaTUXHJGTwYTaK9EEOpOMlEAjEkMlusrTDo+QT/RoSpySRh2oxG4bT18VP4MBravC8yGbv4jcQNfF5ppjZswO7T2z/uGdEFor7CRzIn0aLs3LSWdX2sulreYCLcXMFrTz3MqnoDN06filsL8uijj7Bgs8ZtD/2daaP7kgrX8+6zT/DU83+h74ihfHfqAJx22Pr5Ts6Y9i2euft+enYvYsvcf3Dfw8/Td8YPuO26b5JtaOK5P97Hs48/xogTh9MvtII/P/E6ZZO/w6M/+jrdsqzs2byCP9z7R37/yD8Y+tdf4nXYscYCBEJFTP/2DVw4aQgte1dz18/v5JWXX+Nr55/M9G9eyw3b9vD4rB3c+tBDnDd+0H6YlFkrlvRWdy4TpvTkncUvsGLdTgZPHYQWaWbVitXkD5nCwB4hkmr90KsQ7Z9YOltkLxEj8k2GGxvpOf48brjkTDyGAH/73Z08869/8vHlF1M2vpT3XnicJ19ZwdnX3MIPvjYFtznKwpn/4DePPc2jfYfwu2snY4752balhXN+cx+/Hj+Isp49aN42TzH9slYNH3c6v/jJNdz76Fuc+6Nb+dGlp+AJlzDqpVksWriY7144jnwbbF27kk37wsz45nSKnUqN0rHv6TWpdu92Vn6yFq8Vte5p8RaWz36Np97fwLjv3MGwblZmP/okf/z7h5z5g59x9aWn4zVGWfbei/z6oef5Y+lA/nrvldjFwLuxml3lQb7z87u4f1AJ+9Yv5M7bfsMjjz3LqD/fhMvlxOlsaV0rBCBZOxwOp/I8aA+g4JeUn2ULl5LqcRJ/eugaBnfPwxCp4/k/3MW9b77AGRdfxPcmnMkdN+3ipnueY8Z1d3DD5dOwxHerNVA8HOwW+PhfL/LAn97kxG9dzw3fO4d8l4G1C//FL+95lAceLePZP92Ew+UkFY0SM1q45Ma7OO2EMso/m89tt93H66/N4sJpo+jm6cACtQf3qK+/NOIukPUuMTO40sL62jiOI9vGUR0UIuvJMVJXrtFcp2EXTUBalS0Z2kjmgfFh0Iwk4wnMyTpcTlHpGxVhT6XEuhBSplTahU/sRHWEd6g3I72mJ7DRpEvQJrNQ7TYmQiCQ+gxGAzabGafDhMOmCXvQATgh3uGIhtXipCDfTZbbhMAifRU7VVkPnfYk++qTCt7uJTbi8WQ6j9SlYbeZCAY16qoacZjDGCyKb1bQiPReFQpTVeHHbYmQ5UyqD6A9FClN4NZhN2AknjIQ1lz0LPPhcZl0RkNDLTItkRS1DTG6F9mVRBWJirGMAZPJQDCcpLYmiMsKDrtVaSZERSfMhNXuIhoOkoiF1daFzeHGKNsb6W0I2ZPD5CA3L09tW0g5gV1cp8wWneArexCDCY8nR/foMCgkKYwaDKJV0RcawdmeyhQNu6KMH2xlwjAzNguUl8sYG9W2itJqtB8JQYigQKx6VdsmrFab2oaRcchgR2+nfcGDXwtBM6pFWs8n9YhrVJpfOXjhg7wVN9NUUmaTPlcy00pUu8IIf1H9an5pwhnvbw2tWEK11aT3uj0YwrgJEyZJ8sm4p2/bZ1PgSBsyvzPJYrNBvIEn7/kf3nosC01gTTODFkcel/7wJr5//kkEy5fx/qLPKOg/nT5FTvbt2QkmM32HDcHwyhxmz/mYK6YOBEOS7JLBnHn+NzlpeDdRdPPc4gXEi0Zx4603MaGvrmb/2T33MeTDdXi1IAvmf8Qev5UrRw4kGahlR4OGxVXEoH55PDl7Lpt2X89EC8RN+cz4+ve5/qoLcEsH+vfmgrPe5/4PKtjX0MLAvoX07l6E19vMgBMG43NnlO2Z3qZ/UyliKTO9h49jRPE/Wbl0Jd+ZOohQwz7W7Y5xxpnDyIl+rCzHMyVl207mjb59l0Q0hANHnsPNP/kRI7uJ1A/fvOhc5q35K/uamkkF4aO5K+g15WJ+ees1lOhZGND3FnZ9vo1ZH33I9gsHo2kWSvqO5/LLz6dHOk+zllLjKWNltznp06M7vqxsevcfSJ5QZe9Apk8eyUNvr2TDrkZOGeBh5dKlRLy9OHXisAzIHX6NJitWY4wlrz3KxQv/qVwfxa1O7JuMZhtDZ1zJrddfjjO4i9kLVtBt3EXc8rMf0ydXX28HDOhB+batvLZ4LltrL8FjTGHxdOObl13N984/TbU1sP8Aandu5L5XV7N20z5MVqtyS2w33dRHoL6/1o9Bf5tKxMHg5pIb7+FrqSRhfxPVe3fhb66mKRBGI0pjwI/B0pe+vXuS5fHRs/8gCrMdxOuUfzdmq41USz0L58zHPmgad971M4YW6OtUvwG9qd25hQfeW8Dand9nCCns7gIuvOQGrr5outIW9e+Zzxnvvsv7tXtpCIbp5lGzrAMej9XNl0rcLRYD/QrNbN4aJxLRpffMfvnhdEAmoMUKud0M1O1LkZVvxOY0iNHtYSebNYHFlGjbrO9cQ3phOgLj/g41ZYioToaluQ7TTxES+fjNQgAtRqWSl8VZTwa1UIuq3ufU2LypThHN3FzZy8nULDZJJpwOBw67HYvZisUi2wx6DpnXPUvM+LwO9pQ3QyqA3SpEoT0cbXVJQZP4uCY13C4jPq8Qd52DMpkkXwK/KY7XYcZsMZBw6G46ZrOBRmOC+kQ9yXgLybiOWimRwkBEmCEBSu41iIvtUPqjE0iCEY2E2UuOKx+pK8PghNrZvkh2fyBJ1OCkR89sxdSkBDYgEkkRS/sEC8EJBBIM8LVQaHERqLVRFRa/XSE2RiyKaJtat2JUBYppM9DQECQSTVBclIXF4lbbKGlMKnuNZFLcatrjTpXu8p9AJvYZorVoS0IQRQOS2b/U30iNwlSIVsZiManFXdpKcxy6VknZjKTQUnGSqaj6lRziC51MphTTkEzGUXYgbQ12eaUYFoOmNDnCcrafT6mUaNdMCKPX4bl8f61bOFKtAZPRqvyyZbsqY0sivsypVIz8gmJyc4tb2xfNjWZ0MmHGecw4aQCpRJREArKLyhg3fiLDhvRW+4/7qioJhANUbFzMYw/tIBGNognOtSR5vQYzor8Q8oSaIy6fD7c7jctQM9U1zRR1H06fEp2wS+O5ZcO47CohRGEWP1tNc2M577/xDCveMyhGWZiVeCTK5ImjyBVXvJYkmttLQY9C9N1bqSWlNFQpLdHq3hWNxkgmE4TCEWjdSW/tbutFPBrFXdiT004Zzv8uWsPW+q8TX/cJIV8fhvQtJbRuifomWgtkLtQ0k0lrxNOjO952DIT4bJutBhKGFJGmWmrrkvQZO6iVsKsqrIX0H9Cbf26roLbZrwirJzuProTtTJMR8QFPxAlFpE+STEyadhqvfvAwn67dxOgsLx8u386QKd9hVGkbdtKZ1Y8wbcmUkV4jT+ayc0/BYdGIxxLYnHkMO3EsJ504gmynmZYd62hsjtBv5Ank+dpJrqZs+vXrR2LdGuqaA3gSKZzF3eg/pKOdRknvfhjYQG1DHS6xm1IMaRsksi5m5mTb0/SVwURzxSYef+zPvPnOQvw4KC0tJlZXjmb2KVspyRmJRJRffjgU1guqMZE110As0EB1VQs9+k2kZ5qw65kc9B84iNjMt6luaGCwAyweN3m9S1ptt7V4CrPEFzDIfGpd8NPAHdufL5W4C6jdC0zkbzexszyBqYdBSVJtROwQO6Ov40p6b6hM0FSjUdgzLUqn331RTbLwmG1mtOI+fLZ3D7lZAQqLfcTT5WXhjsVSVNTE8LjMZHl1A7g0Xfqi6g/5vaxVMk/8YRN1QTs98y26z71QvnZJ2hXJKC/HzL7qMJ9vrePErO7qmRDATEokRWJLqefyNPNKyjvsRhwOOy3BOE01YWxavNOkT8/YNL2VD0KXzoRJSC/wksUohEnDZE4q6VtJcel+CEF1u8x4vC6SoThWaxvfpMuGApW+TSAEo7O8aE6ZcLgduJxm5Qus+iZtZpgU6agKPpTCZTfTp5cblyPNeGSQkGZ4RCqXrQ23I4qmxSnq5sXjtiqGR3CkiWZElWnrt5RJJjQVbMST1OjTS1SqAZKpZsWDCD7lvdg26CPX2ugBL1Qv1XerS7qZjPE4hKNtbctcEHehPXv9ipEqLfUqJiQa1SVbQYIQfAmYIkTGaEhhMmuYxNHDaMBiymgbdCm6reZMi138GiS4lL7tohPwtrmU1AQPohHQxyxTWu6F8GcWTHmtNAhxwbdei8qrCaMhzJwYRLURd9GYGSxZnHflT7jx3OGZavf7tdgdmExuxs/4DvfddDHGSItS6wpjIdtEdk+26JfUGCtNS2YhMVlxOS00VtZRH0hS5kgTjGSEDes3kLLb0cxW8kuH8eNf/IaTB+cSDEUVcyyBSkxGCzn5Lvzr4jIgYNT3jPXRSylNSWYGC9CCD/lOJODMAZPwJKkESQwMP/EknPP+yYezPyS5cSO9Bk+j2Gtic/wLpBPxYrLo7mSZdpTWJs2TWV0eXC4DtdXVBIA2tqaFmqpa7A4fWQ4nmuyBa7pmJ1NP51/1ZRiMWNv1qdvAUUwYXsb6j5fyXlOKvS0WvnfKRN1/unMFgpdUgrhmZfipF/HLX15zwHwWp1sx6LuqKwnHIKuV341SW1uN2egly+FC5KG4P4C/QXrXlprrq0kZbHjdHhKphPquZdswk2LBZoKxNGOoHoqa0KCY+1jDNv5w790srHDy7Z/eQr++Axg2sIw17z7FtY/OJZ7Z71WfgAGrqP7aJaHHFrsbj8dKfU0VjRHwpLUh8iXUVFdiNnvIdnshLtK+8Ek6G61WVBkLNW9lRn256Usn7l63kTNH2Pn48xgbt8QJ5mv4CoyqY2oNOcT+CT6sdvDmGWmuTpFbIkZph7ePb0glsfhyaAidyLKtKxlvaCGvwJ22uk9hs5nw5XtpjmrEG+MU5lqVel0tf5k18ChGRKqQxV0CWtgsKZyWOC2BKKGwHbfDqEvK7fAhfRZCmu01UVPXQm1tkOJigTcNjKiRTUYicY36pjg5WTblEpjBq8qmyWIOaSFcZwIyfWnX1kEvdQFe1wq0XqcJZVKIjYyFkUh6rWqrXpClI0x/1nYvV7qBlQmXS1f1y73SLLRVkAZL7CWMuC2y329SknQbSdHrl4xqjljN9OlTwKpPK9m1q4WhJ+TrBoZSszENc7vOCpEU2wWRshsCYfaWh7BaTapPDruJ7Gwxxmtro13RI7xs65wQd6m7sdGPx2unfz+3ihYp45dpsS23NJd5mhas290eDjCZ+dFVGYHp0FNH6IQBERitsinZOWkaIskeLOX0OoHR/bKZNedd1l44ldPH9MGUirJt9QKen7mE4TMu42uTe6s53Db+gC2bkWNG8tR7r/Lss29QevU5ZFkSLH7raX71yOvMuPYOThs7nude/iPvfLCEicMvp0cPB6GmSt56+XnW1fv4nxu/h0s+TEmdu9UJaCGW4VCQ5sZmtBIxZO2UofVWQ+h3t0EjGVj8Fq898Rh5vYbxw68PUoRPbUkdsGy6kq4Gy6CREOM032BOGtuXB159hadfGc8VZ4/GYUzwyQcv8vJ7HzPo/JvpWZhNLJ78oi4hwk80EqK52U9CtuKEeXcUctq0ycx+8FUeW1aDt+9UJo1o59LX2s+2C9kYTMZjRBLgPgB1seX3ZvQJfXnn2Vd54fXxfO/CSTiMMdYtfIOX3lpI8YQf0rvIQdMOM6HKrbz+8otMGV7CsNIcKjYv4R/Pz8TX4wyGn9CX7WvdNFQtYumKzxhdMpZwXTlvvzGLyupGxtjaCL5AaDKbCNTuYNWGXQw68xZuuukHOEhRvmk5Cz5ciL9ZVPOZOZAkGgnT3OBPC0xCjKVvgvdCxk4czl/vfIO//n0qN1w2lSw7bFv1AX9/4V1KBlzEwJ65xDelIxMeaD6lJ040HEZLRyf8ounQhukvvjoA+r+44OHkKM43cY7PgW+NkblVYTx56dC0B+p0V5WnF7ysfAP+Wmio0KV3pZ091HqEGBo0bCW92dYYwbxxFadnx7HZrcQTYLYYGdA/F5/PxmdrKyiviuJyiFQkBECMvlBSvajRM6E0ZTAyzXe+bt8NoQ/RhIHagAptRKEvQaE3SkNIY+8+C33KXIoIZ+h2pqwQcrvNiJaIsq8iQLduHiVBifQgrntFBS76DShiz+5GIrEoxflWRZgUgVASr0YwlMBqTB4ZYc8AcoBf6buEGTYYzSST8mGIn2L6AzlAGfVYSTZiq2pU2xJqnmcQ2b5cGqmCB2Eg1CeWZjD0bB0LCYF3uiyYLCbcbjHek5DDQnTaxql99bINIalbsRuH3aysnAOBKJFIgmAozoB+uRQVuo88JGf7xtR12+crfZbtGrFdkD1CLSWScNv7/Yr+xz3oDKvc789wSzhVCW8djMQO2gOzpwdX/uhq1v38Xn502QX07dUdmzFJZcVebGXjOe1ynxrFSCiI399CTKiQSmZOu/AKLlu5iZf+dCsrZz2J25Jg17ad5AyczskTxnJS99FcesESHvr7vaz/8HnyfS6iwQb2NiY484qbcVtEQxOjxe/H1QlOsRlp9gdapbqi7iUEdxfgWCkAACAASURBVP+Nn373+9x+111cdu6JHSzDNS1JMBAgGooqTx4c3RgzrIxHn3iek0afw/CBepCYeDxKwB8gEtN9ewVPfnWvc8mRYAB/S0gx5hnEJWKSx09QqYttXHTlNSxffyt/+Pn3eeOvZdhNcXZu34m750R+dOVF5DkTNDU24jeHO8QRSyXjBAJ+wlG97bzcAqzhvTzw8+tprrybm354Lk4zDB43mUHep3l8eTk3X3UaPb0HJhkiubcEAmjBUKsHQAbuDr8mDxd9+7us3riNR+74MTP/XobLHGf3jl2YC0dwyw++TrYZamMp7DkeKrcu4ZfXbVTce03FXvyUcudtl1GW5cA0/lQGF7zJ72+4ktl/76fsBrLtGj6DhURapZ6IRmhuDqj55+vXn+njh/C3lx9g+qpXVYTBllAze3eWQ1OSvdv3ASPw5ReRZajlwV9cS9L/AD/+xgDikRAtLS1EU0ZOufAKLl24mmfvv4H5L/fC64B9u3cRdvThVz/8Nj1cBraHQgT8fsLp8VU4EMYw2IK/xYhssSSqNvHgfY9hGnIW1373LGWE2AFXR3Fjuvvuu+8OhUIqrnx2dvZRVHXwosoPXYMttQks2SKRysp28DKd30p2m92AxMAQw7qsPDFaOsCq3blw5l72ls0mEikL0crdZBnq8Wa7iMdT7N7jx+ux0rMsC7PNSiCUIho3kDJIGAYTLSGNRn+MJn+chqYohlQUq1nUmPoi1747iZSBUMyIw5rCZhapESIx2VsXK1iNYFQM2IzKdaWmQVxELOT4dE4zU48s/sJECBEPhKGoWxb5ec60WkdXf4vatls3sVw3U13dgt2qYbXqe+9SXpiFmpow5lQY5fXWXvrL4CQtE0reuGYjN9epMxRpQIS5CUeTNLUkyPGJz3/bAi5tyHvBTUtLBJsxcUiSroItBTHNQV6+U6nauxRS0vQjFElhdznIz3er+rvKK90RWBLxFBWVLeT47GRlSTyDDEbbdbjTpdLaZNnx+ewKxwV5TkXcQ6EEYuvQvs+dih7xreBAVN1VVUHsdjPZ2Y42rcwR1/rvLCh4NmC2FGG2tAViiQabqA/BSVNOZXCPvIMCWNDzBCaeNAxrMkhFeRWaLYsJMy7m1puuZ1z/QsU8NtfWorm7MWHKREpydJ9om6eAsePHUew1UFFeQTBl57SLruS2X/yUE3tng9nB6AkTGNA9m6aqCuqaQxT2GspV1/+May89C4/FQDwUoNofpd+osYwZUJZmUTWa6mpIeYqZevJEir02cgrysRmiVNf56TtsDCOG9OxI3BMRqmsD9Bg0ivEj+ys3KY/DjNGRxSkzzmJsf33LQvASCJsYMW4iA3rmEA010xQyMmLcFAaWeqirqMBVOoiTx4/Ek3a5Dbc0EoibGDX+FIb0yMdT0JPx48eQbU9QUVFFFCennns5t91yIxMGFUI8QmVFDbl9h3HqlJHY0zsW8UgLDf44g0ZNYHi/Qry5ObgdJmpq6skpHcCkccOwSRAwu4WNC2ezNZzPT2/8IT1zu95vl0FNJSJU1gXoNWgkk0YOwHIQHt+dX8akySeRZUmyr7ycqMHFhDO+xe233chpI3qqOVK5+WPeWbqTC67+CeNKzGzcVkGfUdO55Y7buOCUwWol8xaWMXRAT4g0UdsUZfjJ53HV5eeTbTVRNnQMJw4qI+Kvp0lzMmHSBHp3786woYMxhxrYW1WNwV7AKed+g+uu/Q49XBa69RrEiSP64snKwec2UVtTTXb3AYwdM5hIUxMFvYYyelh/vL4Cxk+aQDefmYqKSlpiRkadfB633XoL504coOCPhvw0BpMMGTeFoWV5Cl6xS6ivq8Nd3JvTJk/AFalj1muv8HmsgJOnjMFr68wwH/Rz6fKl2AtIxFkVW762tlZxJL169eoy87F6uGV3nDc3hXD1MmAVy25doDqs6mW/MRyE8s+TZOcbySuVPdPDqkLtlyUTKYKb1mDZvZQ+3cGT5SQcSTJqZKHavxN6oFzTlHuapvZ1wuEEn6zax87PK0nENUW4LWItn94fFpWyaAbEmDuh6cTdaUlitejq5mhMJH7I84pREFQ1m9FMdrU3bLOasJuT5GdbFGGVHkViss8qVvUpggkrY8eVKtgy0qbkEQIhQXN27mpiy2f7yPfJPrtY3uuELp7Q2Li5EVOsURH+DCPSGWMypUSFGEx56NcvB58EwEirEYTBaG6Js7cyTI9iiT2tW9Jn6hCNRiicYvPnDRiiAbzOdHyBTIYufkXFJe6CYUMWA/rn4HHp3guds0r/ZD9AGCBfQQ6DBxe2Gt51ziv3irgnUmzaXIfTaVGW//LsUAi8aittCS5jv35DjfJ46N83RzFnRzJfu4Ix80zaE4ld2pGtAGlH5t2hwJqp4z/rV1fL250jsDn6t4ImC1o8kVCulbJ3fmhJVN9h5GAKu8PegSUVNzNRxghD3NWOiahTk5pB2Zt0tVQmYxGiYthks7UaUAlMom6Pi32AuIXKQpNO0p7YbEogqlajSi1JOBzFaLFi07nmTHY1XyWkt8ButoiBoiRN7d2LNJKBWe1RJyQGhs4wq0N0Ekkl0ZkVkxpTcS7ELkCfmyK8JpWFvbipdsZlJCzSuXz/tjZ8yXaIHNaCGJSaW5/rY6Lb0Og2HTqMETESNIjHiHi0JNm2chY/+cmd5E67jsfvuRpfFzsumY7LvM30W4SOrnCfydv2q5GBW8a5/ez47J0nueLmJ/j2w8/x0zOGqHxmq0NpONvK61eiiYjGkoirmtLWxWNoBlP6MJyEWstNFsFZGiotSSQcRTOalbGy1JKKx5THkOyzZ8YsGo6giS2CTQ7ZiZMSw1wZj3YAyHyT+WF3OJTJRuaVbHXEE/H0eGZ6ppGIJ5TQZbFaMGgxFrz2NxY1FPCdSy6ih7d9zZmaDu9XovHJwTEH1rEcXn2HlDsYhagR3AL/FwtTXdYp9g5ONzjcRgLNKbK7iWWk+p66zN/VQ0G6qG3t/YZT02KhYcMihvVrYeTobrid1lY1rrguiQW6SCPSRk6Og1A4n+bqGoi0KA2AvpxBJG5EfKy9jpQaYCHnbntKEfpgzKikeJ8zqSxI5SQoIfhF3hgNUQu9+uTTq6ePdesq2bnPT7d8mYSwtzauPnwxQXe4LWnDqo7IE2IgKmuRPIW5UH777TotErcsWGbhODpMyXaZ1KWugRDVtxDD9klK2q161L36ppiyzm8vyQoMbpeJ7FwX1eViZRo9qKSratdA8OLMtmBTh/gcYELotjCti1t7uDpfC9yyUIk07LBbFEOWWRQ75+3qXvohE1MYINHkhEJx8tNSe3uGqquyR/5Mxs5MOBxXFvPC5KV3Co68yn9jSRmDzvRbTiAT5uXwkhGHsy0KW/uyirC1f9Dp2mZvtdDq9Ea/NVntODtux6oXsvUmUSM7py7bM5hwOA8gxSrjrc71GJQBX/u6ZSurfXPin90eTxITonPqnKf9eyEu+6UuYRED845t6eUMikDJdfXmj3nwgft5Y/4nWEon8IvLzz0oYZcy8t11hb/9YOrwQNrsGo+yvSFbF9G0rUaX/UvXJa7JDkcb59Eed+Lfvt/0E6ax0/gpRq0TbLZ2OJX3XSWZb7r+qONbMboUt9qOSTRbbXDGA/U0J7IYO2IQRZ6O627Hcod/95USd5cNhAfXF9HDBzZTQrTgTi/UV0K4RVMR7A7XLU5LJrG6LOQMG05DyojNvZb8HFsHa+6M4ZryHld4N2C361HdLFoSu032S3Xp2aQ8SIx4hLirE6B0UirFdLKqkeMW21nd0jiZEs8BDU8yxPbN5eTm2hk9uoSt2+w01ocIh2Iqek+/AfmU72lUEfGqqlvo3Su7QwCfDE5ysh1Y7TYCoRAul87pS1sSsU2YmbQreCZ717+yDWDK7KELK6tnU8yD1UiWx8LuirBSoRcV2Egoy2qdsZIxLci1U1/vIBKP4VLucwpp+7cl2wUpIe4WCj02pcU5EPEUuIOhJM2BBL7C/QlH+8qj0QT+qETskxHTqK0NUVjowuOxHeGck/YO0If2DR/ldbAlptTysrCnlSVHWeO/p7jgPRCIE4xGKCjQ4xX8eyA53urRYsDpduLx+hh68oV8/9rrOG1Im/fD0dZ9qOV93Qdw7kXnc0L3nEMt8l+Xz2D1cNJpp5OVnU165+WY9eErJe7ZTiMuZXOYto47gLD2Rb0TwiBR6rRyEaA1PD6DbrjyRQU7vdfiCZweC8lB/anctofmxhZy8tuCCsjCLtbqIokIgRO1k5x5bLRaSYR1NbJIyyIdyqIsBE5gE3KQYWDkOsMAiFq3fZfFkMpl02hsrGPZYjPTzhjA6FFFBIMJAoGY8rsuKnBSUuxizbpq5TIlxl12m7mjdsGsYTEZ6NbNxa7tQaX+s9rMrfu3hyy9qsh6EIqKC5ZsCbQRNsGB12NSUrZE6ItGJba+zq0LWgU/LocRj9dGsE7O+E0o9VhaGG7tdwaXzWEjbo+DbK9uYNhpaFpvZQyq6+M0Bo30ax/UpjWHrooXA7jtOxrx+6OKg4/FEwSDMXXv9eqhfzNj0q7oAS+lb0LXM54xB8x4FC9EUpSzoYUpycm2q+0Vuf9vTPKdiM3FZxvrKCkNUyjb48fTfy0GPN2HcOsf/krSYFJnof87OlIybAq/HDQB0wEk5n8HTMe6TbPdTaG9jeYcy/q/MuIuhG1ndRJ/MoW3s97ucHuUApvDgMUG0ZCmHySjKOrhVqRTYTl1Tuy2M4u/qJdECpEgIbv3NFJfH1Iq+R6lWWT5bBR3z2FvoFEF7Dh6yU4IfIpmfzMV+/zk5diVz7dYbgsrIERTtAXjTyph1eoqtnxez7ChBTrTYdCor29hy7YIoahBWaqGGsO4rBqReFyp6kWLIDH3BD2KVqftA/bHlAQmgWQ0yo4dDco/vEd3t4qwJzBIFLq6hphSV4tqXlzvehTblYGdSN1KwE1zMnIfkS1Ho+42rPyy00yOnM4XiJowO7307ZWF1yVGhvtDk3kibYvGIGXQ2LevmW7d3AixluAtmSRty36bMEQF+U5lmCYBaXr2MCp/1ANpBTLlu/oVmwyxOVB+2zq71lW2o3omn0FTc1SpM8WyX/d/Paoq/y2FFSNkhK3bG9S4lJZ4VZ/+LcAcb/QYYcCggq18ZQSiC6hFnW6XkLHH0xFh4CvD3La9CeZvi2IqkeAb6myBIwJYCok0KAt6Vq6BhhqNcFDD4TkC6d2g71En6/fQuyiIV1lWC1GSiGVhtnzeQG1VE+J+0lDvpaBAghfYcLkkxKpukX7EnUgXlL6Ie63LHCMcjCg1ukRnFaKWSRJ2VqR12ZffvKWOvfsC9OmZRVVFE3NWJNgaHYrm8JGo2Y4l4ae8Io6leSMDe9kVgU9KuE+jBGLRCbxI5G21660onBohyxbBHwzTlPQqQioEXyzVd5WHlCRfVuJUquqKmghVdeIqaFLBckRFX9cUV4ZviZiFQMxCOKpht6RUbH2PPUU8aaIxbMHlsTOoX5YKcXsohFdC89Y3hKgL6eF3hSi2l6gFVRLNz+kUI0DaXNfEsFFCtXbubAaxaQM8vT7RzOj4Eb/3+oaIcr0ToqsMJ9qVORaXQhBFE1RTE8RqM5Hts//XEneR2uUgJX8gyoC++coF8Vjg6Hgd/z9gQKLYJdUBVkr4+P+hy19RH78y4q4GzqypQDRHKmS34kQWaw1cPiO1+5KKuDuPwMpQJPRkLIExWE9pPzN28XdPws5djWxYV4nVmKB7gRUt5aCiTqInhZTxU21VAGMqhlEPka7kulbYjvAio94/UGUSiKKwwEU4FGfjhlqa6gNsrbSzz3Ui3U4cgr/WgN9WTEwz01K9jb7ubZitZhr8KWJJAdSGUaLOaSkc5gRWkx7tXkncugJDMUwibYsVcU6hG49T3wMWLwKDZqCk2E6WRKYw6P7+2/YElQV9Xo6V+sY45dVRnHYzZX3yVUSziuqICuvaFIwTkINcnHb69fORl6OflqfC23aBLyG2+oeu77HLFkDKYGbEqGLlqiaW7O2Trt0wk5/nYtuORnJzxGXOqUcya5+xi2tR5wuDIYyBsvBNawH2lvvJL3DidlnV+y6KHtUjmXvBoIRlTpKX58RiFRV9x34dVQNfUWHRbAhR37ipTuFdbByEGW0zYEqx9eP5LNkVZPoZ0yhpC0d2zCCMBupY/8lKVn66nspAmNK+Q5gwfhJD+hQdszb+nRUlI34WvPcmi9fuQBNre6NZ9yBIxZGwserwGZuLMVPPY9rYAV0adx01/JruwhXVLBQX5BxomdqvmT2bP2X52j2MmHgy/bt3dLXeu3EZb37wCWUjT+Ws8WUsm/M+FakiZkyfjK816tt+VR67B4kQm9et4eNlK9lR00hWcRnjxk1i5NB+rS6Dx66xQ6spHvJT09iCKzsfn7PN8O7QSnfM9ZUR9xyPAY8Y3EQ1DmAE2xGyL7gTKctkkVCAopqXABQ6QTiYlNa5SiGoqUQMjzGkCJlYLQebYqz/rI54OEKffm5lfS3qWZc9yuZN1VTVhGis8yviqCRgte+ckfjafqUtYWLap8738k6eyaZAXLNgc9iUm0dXfchIlUVFbnbuqOX9DxsIdZtKr+mDCdYnaCyPkV1kJmF00VRvo29pPmNG5rB9Z4C1wbjEDSPbZyMQTKjAGKZoTEmOEtJUgJB45nI0rdgMhFIWCi0SG12HWKRyl9OE025U7yWSnGx/i1q+pj7GrvKwsvLN9prxus3k59qwGA0U5llZv9mgmCWJkS/1edwmJe23J2TSSmu8AkX0EjQ0x1V+OVI3Q8y9bjnQRbQt7bGqX0ue4iI3YnRYVR1Uvum6dLx/Xnki2ylC2IUohcIJhRthDhwOMzt3NyMhYPNznSqWtBwdfKyTMDDhiLiHGfF6vxwG4ljD3Lk+xYBpUFnZoraJ+vTOUUxZx/mbYt0Hr3P/uxX0Hj3+mBP33Ws/5P7f3M+8VbtVDHjNkCQVj/NHTy+u+fnt/PCbp3IMXIc7d/0rvddiYdYsnssLM5eC1YYhEaWqqpKU2UNRYQ7GVJyUIwetdBSnjdV9rI81gMHKrTzy4JP4Rp7DjZfrh7gcShubV83jkcfm8T8FgzoQ981L3uCGG++k3H4CD0w6HwsR1n28iE2pwUw+eRI+e1er5aG0eGh5mis38fjv7uOl9z8mmpKIlBpaMsHjBg/nXPETbrn+2xS6vjLy2Ar0yvde5tmP9nDN7bcx8r+BuDc3p5i7PkKjLUWWR6TH1r4c1YUsLhLUJiVW20KjZCwOtW4pLJJ7cy2Jul3s2JaiORCjpqYFoxajW4EedlSkS8lalGdVauedW6sINIXxWEwqdLC8lGkoAWqiEjs8ZlQSsDKsSxtlSUAbSSKXyVVnEGUxlAMJMsQ0gxSR7iS3vBe3PFHjhlqCaAY70ewhRJ0D2bkmQUxctkqNFPc20NKSJLIjqvbtCwpcpDApy/toOKYmcMoo52cbcTv0I1Rj6QhVEsZW4larveumJNt2NBEOOXC5LNQ2xpXhm8CTCeAq/ROCX2Z3KIZBXNCq66JY1HGz+qEmArPEt892p8hzJwjGI1TXRNQRtNK1DMMivviB5oQKlCPPwlFxdXSQkuN10WgKRnF5bLjdcqZ9Z+zp2JJyclRtTo4dvz+m3MrE+lxw1lWSvjQ1RZUNQVGhSxm1bdveoEIA2+0mBg/OU0Q3w1h0VcfRPRMESFwEYTSOrqZ/V2nx0fe3RGlsilLaPUvFFRBGsLO8kUrqfu4SkaurJGOkz/Wu3urPZBT1r6gtz+7Vs/npj25gtT+fq264k6ljBuKymWiq2sbzf/kTD9xyMwbLo/z4a+M6BJlpq+G/48rkyuEbP76dSd9oxmKzE6jZwm/v/hWB3FO585bvkuvQVECsorI+dOEMd0w66a/bx8IFyxjbZ9ph1Sd2JImEftiPXlBj5XvPcNMvf0ujbyQP3P9rzhrTA7Q4F19zEzEc6vjUw2rkMDO31G7l1zdex7MLyrnoqmu48PTx5HvtRJpr+fD1p3nqz/cSSlp44GeXku34Kj/OBNs3rmHJqhp+IGrho0xHX8MBAJDTwULRFHWNKZZsi7AjlSS71KCipB3TYCDiviUEpfOXfwC4Oj+WRSURCVNT7qexQSzNk5TkiXTZFsRFJ0IGCnJs2K1Gqu1y7KYu1dU3RIkEQypSnVg/ByJ63HwhfkKHZOEW/3e5lgUqs0i1EnkVfjQlZ12pOPOiJZBAC9Km+FlL5DLxtQ8EImzf3siqdSHqTP3JHTuOQrebqm1RfAUG8kvFP9pMqGYf+XxOcaGdSDSF22Vh1KhuitjJ/nNjU4TPN9Uot7zsLIsKuytACXMh6mlpv7YuxufbGti5sw6jTc58t5Kfbd0PxxkGxufVD9mpqtMjD8pYiK+29FXi2ks8DFHTJlNxGutbaPDZKMqzqVCvQtwCLUnqgya6l2aTjKXINhro1z8XqzBQiaQ6+MZiNiri0d6Qrv1YiiReVdVC+b6A0raIMaRoYnQo2ufUrwX/IrnLARB9evlU6OGa2hASqEiM8iQQkLgRfllJCJpoK2ScJSDSkc7fLwu+Q6lXjXNSN/oU24zMgRgdy2oqiIf4rIfrdvDugiW8v3wbA8adxgXnTqck24EWrmPWzJlUmMu4+LzpZKeltnigirdee4tU0VDOP3N8x/PXonW89Le/Mm+vgyeee5JLTm0vsY7ixBP6wA9/wdL5S7jknLHk22SRjrP5kyW8N2s2myqDDBg5iQvPP4PeRVkKZIljPvPdBdh7DabMFuDdmbOojrs5+awLOOOUMXit+kKvRQMsmfce786ZT3XMwYSTT+fcM06hwCs+zVGWz3qdT/eYOPviCyjL0/2cq7evZ+bs+ZSdOI0ZJw4iGaxj3uxZzJ6/DL8xh1OmzeCs0ybgk5ivnZLBZKF774F0762/iDd7KCnMpamoL+MmjNbjhogGsKWad194G3vvwVCxmncXbWXy17/D+ZMGU7N9Nf965wNWbdwONh9jT57KmVOnUOxrYwdiwXqWzZ/DrHcXQV5vLvzWtxg/qIRgzRYe/svf2LDjc+qefYwyl4nLvnEG7Yp2grjtVvzy5dwGm1NvZ/nMJ/jJHb+H/jN44vf3MqFPOpJhLMim1Suophtnn5lP09aPeWvup/Q6cRLG8lXMmvcxmrcX53/tAiaM7NsuQEuczSsX8ubMd6iIuDjl9BnkRmtYWxVmxnnnMCA9tm0QJVn61nP87/ub+f49T3L/T85uVxeMHz8aG//DrKWL2X7Z2YzppW8lVGxdzTtvz2L1ln3k9x7B2efOYOzgMl1YizYx++VX2ZUs5tLLzsab9mnbs34RMz9azaizLmFS33w+/eANlu+NMW70ALZ8/CELPtlBz2HjufgbF9CnwMXy957nubc/Ys+OIL+94wF+es1FNO38hL3RfC795pkqiqL0Ix6oYdbb7xD1DeG8M04ic05SWx/1q/1nUucch3Avi/zOigThWIosp5FQFD6viFMZStIkJ265NHILJezq0RnSdQBFVmcJXxrWcHr0vdkDCHUdirXeiLSAhtmXT9LbHY99K1kSRMAoUa8MrdHZMvllEZZTs+TEOJfTnSbcRmLxJlKRFnLcImHL6UuZEm3ucELw40kDYlQmSfCltgTU6VrqipQEuDXJyUVmrHIgTkpj8+Z6FdNc3KQ+Xr6bbdVe4mVTyRvcB3e2wJmk93AJXqP0zAQq63CUr2DyoCa6d89p9UMX5kBC0sqeskjT281GYtE46tjUNLxyGpgs1tJ2caH4sGexY5dEkQOnU6T8tn61vxK8SH8U0VXMjEikeg75UZK2IvQGFSEvFg5Tvq9FweNxyhnhwpjpxzWUdvep0K8SdCfjgujARJZaNPWIgdJe56T7V8fYsKmOffv8KiqdPDtoUnvrCUVc1dgmNfJy5RAQ/djZL5OwZ+CSedbK5GUe/hf9yphLuGaJ0bB3nx+v14Kn7YisdE9kTluJNe7kiYd+Q/Xnn7OvppFX33yD2Qu+y0O/vZW+OQ7KN6zgkY8+oO8JIzhtcIEqu/ezpTz2l38w8Xt38PVOeKnbuo4lKz5n9OmXMWNye8KuZ3SVjuK+vzxFc8KCV7aXUhFmP/8odz34v1Q2R9V5CO/Peof33l/AHffcyilDuxNr2Mtbf3+YRfUGeua4aKitVXHZX3rjPa6/6wFu/d4ZWGJNvPjnB3jwHzPxy+koWpLZb83k7TlX8OADv6BPTpJP5rzO00vMnHDaWa3EvXbnBv7xtyc5xdGbqScU8b+//SWPvTJXHbaUSiR4d+YbfPDN67jv9qvpJuepHyTpx5GmSMRjRCPQdvJtDR+8+L9sSGVjaNjOroCVorGnsfDtT3jwvgdZXxNUmqJ4PMzrr7zKe1/7MX/+7U8pcptpqdrMIw/cy9MzFxHXzGjJGAsXr+LOB37N6Nw61qxbT6O/mfCmVaxcs5ULLzwdn+MLvjHVBw2zcmvys/DVh7nu5t/hHfdNHn/wVwwr8bb1MtrE+68+w1rGMWX6RPzlG3jygTtpzOtHniFIQ2MzdfWNzJw1h18//CiXnNxPjem8lx7n9vsfZ1dDBKfLwfzZ7xKtqyJYOoaBk0/Zj7inmsuZ/95icgafyiUXT+tA2BUw1lx+eOfvOU8O6SoSF7UEq957gdt+9QfW721Snkvx6Nu88s+Xuf6Oe/jB1yZhjvuZ89IzLIyP4pxvnok3bXBSvmEZT/zlWb436HRF3DcumsW9f3mbbv37Ywg10djURN3LLzN72SYe++2NVOzewqYde2hpirNiwRIqLj4T/9aP+etb5fQfexKn9tcZofJNS3jq8acYecW9XHQQxcJBXrXh/YuudlUkeH1ViBfXh3hqeZB/bgyygRjBwhS2Mg1fsX4S2rGU2MXwKxLW99tl390gxKeLhf9gsEtwF7PDSSxvMJUBh8R3Cn+vPgAAIABJREFUUwS2PYHuXF7eCRGUiJOighbLdiF88idHcEoc+cyf3aKpiHSZe/FpFzc1t12PXifR7LKdKXzOFE6bRlNThG3bmti6vVGpxXfvaWLN6n0sWridfRUxkkUj6T5hJJ48uwpMIwRZkqhHo3Iu+u7NjC1tYNAAnwqZmCGEGVW21L9uXRVBfwhRPUtfJE8mX4bICdNTWuxg+An5yv9a9tE96SOeMnnb40XwIap1kUYzUTtFYyFR3iLRpArJq1MxA25bimSomfUb6thbFVWGdzlZFhzmOOvXVdISjCpthdSlNAlJsfIX1d6Bjc1Eam8Jie+dcN7dGT68ABXprZ27XGd4Za4IfFJW9r0z7Uk7GXy1L3Osr6VdaUtc9v6bkoy1wC5/wgiJdiQvx6nOP2hpiannHftjxKQlaN60lkZbf+5/+k2WLV/Kn+64ks3vP8ufnp1N3Ohm2unTyPHvYuGyteniUVYuXYLf2Z0pE0ar6dO+3qrKSmoiCU4YPfgAp48ZKOjRi369u2MzGdixfBZ/+NPLlEy6kvcWreDTTz/h7ecfxFu3knsfeJKqiEQKtIC/gUDYxoU/uYe5i5ay4N3nGF8Q4bWXZ7K3RaOlfD2P/uMNck+8hHnLV7Fm1Sc8cc+PsbbsZUt5ozoH3ajOF5dIiW0Qy6VEpLNbzezbspzHnp/LkPP+hyUfSx1L+e3PLiFcs5XtVc1thQ7zSgxom6t2sX5jNd+64ykWLpjNNdMHsWrRUhwnnMkr7y3g09WrWb18PtedMZD5M59h4bZ6tXC++fSjPPzMfKZ/9y4WLF/GovdfYqhlD3ffch9V1mE8/uD9TBw5hitv/QN//NVVhxxNTY5iTQQrePHPd3PlD3/O+t2NjBl3SkfCnu6nwlcaZ8LcB+sbMDi78YtHnmHh0mW89tSvsdZ8xptvziEuUfQ2LOD3f3wKrecZvPz+IpYumse9P74QQ7yZlnB0v/C80kygvp5NNX56DxtAWX7n6HE6IJ68YgYM6ovXYaFm8zLuufu37DYO4omX3mHZsmXMfuPvjCnw88Cd9zJvQwMGixxQ1XG8VU0Ze6xM/zSNlvpGikaczpP/nMWy5cv4zXXn8dmsl5nz8Q7O++6t3H71txg8+kz++urfuWDqaCZPmkJ2qpY589eka0mycsFCqhIFTJs0DEu7OZbO0Ppz1JK7GLJ9tjdOyKNR3MOEQVE/XT2b4RzaE5DWlo/iQiaBRKSr35fC5jTgydYNvQ67yrRq1FpURvXeEoqat5GXKyg5CMbkU0hLq8JNZPqW+e0Mg9QkKJE/UVVLFNj2PIi8V7J7SqOuupmUHOVo0vM7jBr9y+w0N4WIaT6ShYVYzAkk+E6mEmlX4A3WhshJ1tCjmxxTayYlZwm3S8IAtEhQl8YgBdm6UZtI3F0leS7EOS/bSrNf4iBL8B49rKsQwc5J5E8hiMqvXbYU0hlU/ZqE49W1JCkVex+89gTGaAvleyTetkZZiUOdZvf57gC7dzcrP341xpmKOjfY6V6IjGg65Gx0MYKTM9zbG+x1yt56K10RrcEXDHdr/mN1IYRRGAs53EZg97jEluBY1f7l1SNjIrBLEvgl+E5jY4pdu5vViXpymuL+vvoasVgcb9kofvLja5l8gn5k6MXX/A+rP/mYjz6ay+7vnkPfsZM5ZdRLrF2xkqpvT6cgXM7yVRvoO3oGI9WBMfv3S+a02FUc/GuVcgmWLVnEXr+Ry0/og79iO7U7EljNPvr3LeCZ+Yv4vDzAeIltYc5l+kVX8MNLTkeFFimawflnvsF9H1RS0RCm1JtLaZ6bT1bO4S+PORk+bAgDTzqXh8++grxckawCaCpOhr491wp1+plMNqc3jxKfmbUf/ou/ZCcZNnwww8+4nGmX5pCdpW8RtJY7jAsJ15rQbAwadSoXnHsSely3bK665T4uS8Tx11ax+uON+P0NVPvDGMwRmmNxSFayePlq8k88jxuvv4qe2QYoLuTW++/nw+W78Nqt+HJycNod+HILyPZ2HS62K1AlHn/NnlUsXbuZ08+/hCH1m5j51MOcOHwAl506sEMRUeGbMKr48ol4FGdOL2Z853oumDhc5cs//TxOHfsanzeV05KCDauWUR4q4Labf8aUYfq5KBdcfR0b1qzh8U9CJBJdWN4q7aLE2Td1iAPfAZB2NxtXLWFLk5tr77+N86bocBQWnM5tv6hm49W/ZsGSVUzvPwqr2YwplaF4egWyTStHVMs3LimRTNKt53h+dM3VjBqoe3Kcf/4FvPrOEnZV1WC2jSU/Jwu7M0JRtyKlXeo9fDwTh77BikWLqLtqKnmxcpZ8upXSMScztNfBD2E6auJe3ZBkS718vAZki1OOrZTVXaT0Q1yb26Hy0C5lr7KpWiPYpNGtr1EFszncw2NaW5Iz3p0O/FkDqGzaS262RF4TQ6zWHF/qhWomTfA9DgMlBaZW4p7hBj02jZDZQtBj0g+faAeRTBwJZRut2Ut3XwRvlmO/c+Ez2aVPJqOGzaofWavU8pmXnX6F2Ajh83rMVNVF1CE2bucBdPOdyh7sVvXXIDH4wRiRk7/Ed91HnzKnOre+traFQItPGfLJ6W5flKT/ste+ZUt9etvBpLYjvqicvBctg6iWxZBQp/Bf/qALvEIgd+5qVi5kQ4fkYxf7jv/ggPKyNukGnVC+z8/e8oD6vmXrQ+aQ0WygTx8fDqdF3+rpgPwUSYMRX/++9GnvCmX2UtyjO7GdNdQ3ttC3dwnTp01g8fPr2LCjjmDtSlZtj3Hpt08jvW3doda83Bx8ZgO7t1Ugp8R3tZCJFLu98v+x9x1wdpTl+s/pvW/P7mY32fROEgJJIEoTvWIDVFQsWABFwYsoRemiWPDeKypeC6KXewEbggICIlEEpAikk5C22XrO7ul1Tpn/73nnzO7ZzW6yIYn3/jWT3+acM+Wbb76Z+d72vM+bxYzZ0xBPxJHLRfHcEw9g13MMp7FoihHZpBGrVy2B325EKcsi5F40tDdiVHxVhAu8ohaRyeVgbZ+Pm266Dl/+8q348be+jIJqRKh1Bk469Z249LJPYtkMrXAJzXZ9UmfHKbhYVCqfV9AwYyW+evOVuOnL/47vfOU6lE1mNHTMxqlvOQ+XXHw+5rWytO3rWOQFt8IdDI3BJwzseA63f/f7WP/cFqhWJ/wBHyI7N6NkqoeNgJhMBIlsBTPnzUNDTUrxjGUngX9cuvelRTgVFeHZnnLnGDowuRpxzvs/i29e+ynY+p7BB993Ab74hWvQetf3sG6eFoIZ3yCxT7aAH3XTR2lvBaxptaBsKKNcLmNo3wBMtgZ0ttXQ05o8mNPaDu+GnSjRohq3uH0etLmtWL+zD8NpFf6a69V3zSfDeG3XABraWzAcGYTb24jZnVqlOn2fxtYuTGtitlAflPIyCXfIPdd3kPnFJGFHfRV742xrRShYUzOBxW1sJigV+iIApVhEuVxCgahiOGDyteKUU1fhyTv+hJd2xjEz/BQ29xXxzvedgpB9rDKhn0f/nOid0Lcd9LNSArZ0F5F3VRBymsSaPtpCkYRF2STQv6sCb50BntDEqVEH7Xx1B7H4zAZYGloRDjcim9sHp8soVaWm2saR2E+EqVhFRrGUOY6M8WvgOhXFMsvbmqoyaPShZcy6lFdhyUbQ3KbCZrcgr9RsF61R+83Ye0U1IRJXxFvAOvG1E9D46+DY0B0/EAZicQUuB2PSo2788fuP/BZj2IBiWXOna2jwsfYV3zu3Q4Uhl8NAvxZXJ7f/3oGsCL7580Ja1sHopYw0r3+hkKRrm3XuWQFu9uwgbLaJq8vpx/BTnlEDYLdZkM0VRRmwWCh0jRo4kwGaUnlSJam2rUP9TiFJxkPm0U9v8yIQsB8w5HCo7R/p/Xm/6ZUZHs5jcDCD8FBG0gYDAacQJNGjw/RBFi7ihLu/G4QWbAXZgTCiKQUYoaXNIxYOw2hthNOuoehXrHsjOn/1HJ7+46PY1v8XqPVzcdLyWRNeUn3XXMzrasC9Dz+ADZ94N06cOU4gVhK46+tfws+32vHtH94Kr8uBQMNsnH/x57F2bkCErMFkhgkVlMomTJvmRmZTESrj8wZVMlu0qZNhGir8RiGbKqaHUWmYi5t+cB8+17sTGzZuwnNPPY57f/otDJl8uO/rH4W5Gpuy1BQIKSgZpLI5GYtYNAJn12r8+z2nILJ3O155ZSP+8uRDuPvbNyNu9eLOaz90WDzj7K+uFie6X8KN196EzYVGfOjSL2DOjE50tjfjT3ffilvueVZYOGFzw2ZSsau/H5ki4Kvq8JVcApu3voZA+yx5N2QMDpEKltZzoGkR3nP2O9EesAOBN+KmG67A+Rd9EVdf9w3c9b2b0BWawD3O955eQOPoBCCYJt4LPmUGIyioi7ntGIplAej3v4J4JoUCq+uNnXLkOTIFW7DqxAW469t/xKNPbcLFb1m03/P15H3fx80/fhr/+s1vwVfXgEL2BUSi0ZpzAJlkBGTqnOMOSmnbUoVcIBBBrTeYS8WRo2dE5uDqWuKpasxe3ivORyPzsHgWzLAwRCQLSwCvwYJfP4W/PPEHbOn5E0ruTqxZPq+6ffKPA4v+yY+TLYl0BdvCRTiDWinToy3YKSSIwh/cU4bVbkBjR7X7o/f/ID2eeDNj7bagFzlHG+LJktwLnov35GB/E7c49bV8/tj9Euu80/W93x2pIJY2YDDfBIM9ICBAvXU+EFQKMr17UG/qQV2Q4Dqtqhu38Y8CmgstVLKtzV/UBJffj/7hkpS91N2sepu1nzyUuALmrSfSJSRSxdGHsHZH/fu4+1Ap04NDatrax1nfmULWAIcVsKCAeEKRrICgx4h9e4bQ15uWwa99L0aPHP1GwUMLcto0D1g8Z6rxcj6rUkmPSlSZZTc1MpYdO6LYvn0YsZhmoRzs/KM9Ofg3noPKBEvjEiTY2uYVi30q7w2fDf7xfo1MBAc/5WHtoT9De7uTeOnlfvT2JdHS7MHCBfWY3uZBo1D92sU1PrnnwQiLyYihzc/gp/fej72RJFjH/C8P/Dd+84dNmLNyNabXa3ayu30RTl/difW/uhs/+f1mrHzjqZgRGkVz116MyduB973/XPijf8VVV92EP/7tVcSSaeRyWQwP7MbPvnEjvnHXH9C2eCVmN/ixZMkymDLdePxPz0N1BNA5YwbqPGb89Q8P4r8f+BOSRa1gEp/X8c8QQ1G8R2abFdn+Dbj8Ex/HrT99Eq1L1uADH/8UvnTN5/GGRS3o6+9HQTXDGwxguG8rnv/by8gWCgh3b8NjD/8e+8JpuFwORHY8g4s+/HHc8dtXMHvVKfjIpy7Dl666FMdN96K3fxBK7YVO8l3r07gXTsKEGvZFPyw+uAubdg9jyUlvw6cuvABvPWMdXMUwnn76BYSTBcmigW0ajpvfgZ1/fhD33r8eyVwB2UQYd3/nZpz3oU/i96/sg9VqB0vlxoejyFexPuQIYVleZqZMujD0yUmthpzi+Ld/GDdcfj5effjHuOk/7kaCOmE1bXUk7KfyPpASfOw18jcZN8sGAxasPB5BUz/uuuse7OiPQylksGn9b3DfI48jni+OKds70j+jC6ec/X6sacngm9ddhbsfeRYD0QRyuRyS0UGs/+V3ccPXf4h8cDYWz+nEgiXLEcAg7vrxf2LjrgHkC3lEerbhJ3f+BHsyfixftgQmiw3ekB19e17Bsy/uhFLMY++W5/G7h/6IeCoHM4FZYlRoRbzGXZIAsfV1VGCyqQSGh4nf0BZ/xwKsW7sYf33gR/jJI9uwcPVazG7mO6OiWMhL6eFax59+Nw7Lct/eW0TMXEHAYdgvlqx37Eh96kJ2qKcixWI6FhuFxIbeg8Nd+ELbnBYo9SEkw1Y0l8tQoeVI89FiTJlWnR7c0/vCiZbaof57on6IlimapkaZO36fke3VczCliAtvNtslscJwyoyCuxM+v4dFh0eboEu+ZEB+oA8hWwI+X5PQr9Ka1YUAU9r4vWtmQEBmHdP9gvh8+cUMBCE/2tqE3/gyBf0WpHMkvylLtoD0q/adMzCepNVw57lkEQtfm2iqa/Zrn9fIYbWaSojHc8jkHKgPWuUFem3nMBoa3WIVkud9/KIdyxoAmstYFBlpb2pVBylobXazFG4hsJJMcS+/1I9oOC50u3PmN2Pp0kZNQdLfvPGdOITfmkA2SHEbHkYvA3kNdFDkZE1xrHk/B8NZicExrq0x6Wk4h8mOOxLreSuVYkWQ8BTqXV0BeQ84HFw/tYUZJCpUvxtb1v8PPvvSw0A+iVde3gzfnDfh0x85C6wdJIvBgVPOOAXf//G92F7owPWnr540zYcv44q3nI+vXBfG5dd/B+ed8yiOWzQbXrsRsYG9eGlzL0445xO47vLz4TQbsfANb8P73/EMvvrtK/G3J+7BzJYgYv27sC2s4F0XfQluC6CUyyjk82L11V4bXcu5fE7Aod6OZThjRSe+8q0rsWH9vZje4EWsbxd2ZQO48Jy3wm+xYOmJJ6Phx/fjussuwKN3z4XFaIMtm0ZzICAAyo6FJ2LdHC++c+0n8eyDx6Ep4EC4ezsGPbPwubPPhGuyF0bvlFqBUsijoGjZHvpqThrMRikorN+uLXXtC3Hykhb84q5bcM7mh+ExFbG7ew/69/XCrLjQt7MPWN2Gcz96IZ7b/HncesVFeOzehbAVwnj+5V2Y/6aP4Y3HzYIrtQVBQxL33H4zvDYDrrj4PYg99zt8/xfP4Y0fuBhvXtU+0o3aL+VyURSuAnFC+mJw4uxPXI5Xt27Hrf92Izo6ZuDqd3dBZU32MpOC6S0qgbXl6f0bWXh9SgF5U144RaYf/2ZcfMFfcc1t38R7X3oEM5vdGNi1Cztei8K72C1K5cixNV8a567FTbfehM9ecR0+fcH7sHjJIjR67cglwti4YRtCi8/AV275HLqCNsB7Ii696F343C0/wbu3PoNFsxsR3vMqNu5M4rzLbsCZvG5DBWvOPB13PnwLLvng+3D8kulQS1ZYc1bU2e1SB56nLykKcmQUrJlP1EoZzH5goSves/rGOgxv/SmuuuRSFG78Ev5l7TwYjG6sXbcWd/7n97E9Ox9fOnUtnHxGymk8ctf38Kdddlxw2UWY16RlWegjZrr++uuvz2azUBQFgcBYesCa8djvayJVwZOvFlAMqnC6jRKD22+nI7iCk3gmBfTtLKO+3QhfvVErGHMkzsFYtNmEolJEvq8byMeRyKgizJJpktuUEEsWEU8UxbLPZCtShjSVLSM8pEj1MRKSsKZ7oWiEUmLqGxHkBiglA7IK87UJrqA2agBJbfQ/Wq+c/PIlM5xuBzycZUQno5Sn8qCBr2KqH5bGdrEwRfLre5lNUBQb7PkY6j057NyTwL59TBvJIRzOYtv2YXFbNza4xVpkfDSdKiAaTsFpp/WqueY1RUU7H8+pKwd8hvgsZnNlES5et2V0GyWPKC4GROMlSR+sD7IWvZbiRW75eCwDs6qlHsnO4/4T8JGBxDUV4Qjgg5nOFJFIlaUATFGpIJVWkMkWtb9MsYpy10CUFDLknTcZmQevxa4Zm6P1Nf6P1mXtOua0b9gUQS5XFot6uD+G1gaLpC22tAYkFYZtHYmF6F+643fuToiiVV/nxMEwBVQIaK2TpnbT5ojw0A8NZZFIKvD7bAJi063rI9HH8W0ICDOtIBLJYfp0P6hY0MtRMzeNO4RjxXrVTTBbqvnLJCGKhGFuX4YLP/gOmIa7sb07hbVnfQjXfulSrOgaG3N1WxU8+MvfwjH/dFzxsbfDWWVJHHci7afRhjnHrcZJK5ehzmnC4EAPIgSMdi3DJy79PC6/6APoCGo8pgaLG8etOhELZzYjOdCDfX1RNMw8Dp+47HJ84pwz4LEYUcylMZQuomvp8Vg+u02AXbSOEtEhGDzNWLd2FZr9Piw94QTMbvUj0r1H2pk2/wRceOllOO/NJ4Cp8HWtnVg4fxaQHMLAcB4r3/wenH/2GUKUMn3+UiydOwsr156Atjo7BvbuQX8kja4Vp+DT/3oZ3nnyooMCvWgxD0UTaJ21DCcsnwub7kbn+nAcrYuOx+rj5wiZkNVdj+MWz4dLTWPLtl2Asx5nnv1BfPIj70ZrwIvW6bOxeH47/A2dWL16JepdFWzfugOKoxHv/vhluPozH0FnyA6L04Omeg+ig/2oeKZhzZqVUPa+gJ/d8xAsXWtw0qKWCW9RNhVD0eDG8atPQFuDZ2Qfk92HpcsWoZQYQKxgxvwFs2EuFeCbNhcnrpgHYy6BRMGKZWtOxuwm7TgK/+FoDPWdC7BmxRLYTRbMWrQMi2e3IjGwF6mKD28++zzMDRjRW7DirW99M9pr49sjZzegeeYSvOHkE9FR78XwYB/6I1HYQ20450OfxFWf+xRWza5SFxttmL/seCxf2IF8bADdvRGEOlfg01d8AZ/84FkICH+CAS0z5mHRrHbkhvsQyZhw+jnn473/shYumxMLVq3BrEYvkkMDMAbacNLa41FXZZ8r5jOIpYuYt3wNls5qRUNTIxzGAnr7YvIcLp09TWxKlzmHR+6/H56FZ+Izn3hXVSEuYfMffolf/GkvFqw5FbObtGc9nc3D5bDDoKqqGolEkE6n0dmpIQ5HxuAAX156VcFjvXk4phlgZdVOXV04wDGvd5NuKfbvpGYNTJ9PukAtv/r1tjn+OKPZjHQ0BfOr67G8qR/BkEuEFdGepDTd8tJumCpZEWzlshb7prcpnqL0Y3GUUQQhh4IgN33JFTVcAPfR5OHoNo4bo0jMg3e6bNVyrioYDydqntZTIa+gJ98C+4nnwj+9GWWFYAttMZhMKGRKKLz8B0w3boPDbUVHu0+sOwoyno/kLBRsSxY1CsKZv3ds7YfdVISLIKjqbE2WOZLZ5PIVKEVVqGTdLgLUKti1Lydsc3V+TTuUQ5gXbzHAajZid29OrNzpLXYB31ktRlGEdu8agk1Ny34TCQVeH613KvbhlBU5KjmWsrjoHW6Ck0bHilfMNgwmuqgJUNLGjrwKjU1eqdxHcByJeIhFoIWrL/TOkMSH12cyUBmtIDyUQ7g/IaEHq8OGeh8Q8pqwu6eAxrZ6LFpUL/dDXIEHFGr6WSb+5CVQuLOiHzMWFs5vkDz/yV3ZWjs8LwFPrBy3ZUtEuO4Z397XnZR7MmNmQO6fwUgmQJNkNNCDwuMOd2GfOcYMUURjOSxa2ACH3SK8C5O3zSefNQSWwuaYPbJbIZtGtmRAwOsCinlEEzn46gJjWOMKuRzSqRie+tV3cfXtv8eHrvs2rjj3BN1ZNtLWgb7Eo0PIFytw+4NwC4HRxHsr6STi6TzcgRCcumTks1UqIpXJwmi1w+0YjQPns2nh8HB7xlqD+RSNAAW+UAOqsIExJyxl00jkygiFiIAnC2MCRpsDTrtt5LoyiSgyhQr8dXWiGIxpYJIftPY4X6tGC9wu50hsWa2UkEmmUDHZ4fEwtbdmqRQRjkRhsXsQ8GlhkGw6CaVihNfjHmmD/WR+v2pxIuTfvxRpMjYMBWYE/D4UBl/FnT+5B46V78FHTp079nzVUxcLOWTzRZDEiKRU45dCOo5ougC3zw9zRUFJNcHldkJV8qCQsjo9cFTzxvnOZjIplA0WeN1O7HjmAXzzP+/H/FPfi7etWwC72YTUwBbccOUXEWk4Fd//9y+ik9b3QZZ0MoZ0tgCb24eAThowwTHlfAbRZAZ2TwieSZhj8qkEMiUjQgHWJC8hGU/B4vbCYTEhn01JtUwXnyNOXvRQlBSk0hmYbC64HFYZw0qpgOFoEna3B1YDrzmBp371n/j81+/HR2++HVe8Z81I73qf+w2+//hunHzux3DaLO1+DcXjqPP7JwSZjhw42RfSve6MlFC0qvBZDQKkm2zfI7KeedR5FZmEKkxsZLF83ej4STrEF8bkdEP1NMLtjYkLkgKOebwUPjs2qrAWFZlAKybNNc7bUzCTGMaAoGu0ZCynVsarRRABYO1yCu86t1aspXbqpSVPzxOj0iaDgmzGiP6IAlMxBbMOJlHLUMphVIIdcDe+CUbmZVdzpyi0zHYTci4/gp4AFs5xwe1xCtKd56eAY8Wxl14ZxIsv9WNoKCf9MpvMKJrMyJSrdMAGA5KxPMqloriKi2UV0XgRPq9ZUuIoiFgcJpcb1eJ4HbT8GZcPDxfgc1vQM5AXvvkFsz3iZiJghJwA9EOoE4DxClXvBrfb6J7PqHCay/DbyihmM2PAJ7x1HHNeFzGDPD/vUcHsh2dOIwjyIsKfOe5EytKdPyroNBc+h43Hs4ocOeXVgFmq0/UO5mEO2AVBHfCZ0dc9hFQih2ltPrAYis9nF8/FaHujDxLb48LPibZzG4U7rXAqPRSc+jHakWP/53Z6TnbtJgVwWTQ8Kix+v128CV6PDS+9PIjtO2Jw2k2isJDJ0Oe1CwmQ2TK10MTYs479xfNHo1nx+pDGmPgEXQkcu+fBf9mc7tFiJhY7gnX7VwXZ9NQDuP66W/Hsjj6sOufTePfpx00oLA50Nn/wwKlB+rFWIuLdNQQq1Q0GswXeCVLR7E437KPweb0Z2D1+2EeN0ZH1+hez043QyHEGeHw66EvfA3D5gqjBTo9uOMA3g9EEj3f/lDmmwLr9k3hfjRY0NI6gGaV1p9tbkxWgn9CAYP1Yb4q+hZ/egO6RAVKZPHwtc7B8Qcuk98pic8Bnmxg3wfZsbj+aR3SIGkFstcNnHfucUIl1e0av2+sPQBnciq9+4WL8d0cbPAzJDIeh2NpxxeXnoH0Kgp19cHsDTJI46GKyu1BvP/Ddsnt8GOm1wQxvjTfc7vSMbquezWi2wlc1mPQOGM021DfUy88Nj9+D6265DU9t3IcV77gYZ5++Ut9NlIdwxor58xZgyfTRfnEu5fK6Yu77BsoYyJfhaPj7MGxxQic4i9aYlc9JrXSsXsjhflBI0npTnHYuWVwTAAAgAElEQVRksyVBSxMDQquVE6vMyJx1ma9aHT3RvQwG5BQD0iS0qeZM09okoY1OLyp56xUCdrSOj+l+dR3bMoowV6UanVrOwFBFHxsMFViVFPI9W5GJroa3OTgi3MV2NZlRcbWgbOyDWyYTEr9oZyEoxe22IRRygvS1RDfTE7H6xFbMmR2SCZvn5ovztxd7Ee4dRmOdRYQQY+wDkYIA6ViZrT5khd9jHsnJpqDMFcoiYOkeTqZLiCeLYoX39OeRyZeRyqqoVIFgNosqCguVVnpeyH43nGYeKN3lWgjD61BBkjO69o0HqfjBftutKlKlIgb3hRHuNWNaewjLlmqTGD0XEy3sK4Xtnj1xAfuxqh2FJOPwVGIo3L0uEyLRDLZuTGHfXif8QZd4E9heVemWpvlMsi3+Sc41O6VCvtPypRub65mpQM/B3n0phEIuTGvxCGBosv7x/pHJj899fYMDzc1uUUjyuZJkBqxZ3SpgKFLosgANS8du2hLG8mVNaKh3HRjkNNFJa9bxMef4792XlHtNTxCF/cE8DTVNHPJXCo3O2QvQeeqHcNFFH0bHuAnvkBs8dsBRHgEVrmAr3vTWGagPHkDDOYq9aJp3Er7+3R9g7W9+g/XPbkBWtWLlurPwlre/HauXzBjjGTqK3TiqTXuDdejsmo+21e/BhRd+FDOCNeyFqoq2hSsww+4WI1vvyGEJ99cGSoijgpCLxUb0Jo/SJ60cWsgZVhVjJbhRkpQjeUZO0kqhgkKqBEOQM/S4pUpXK67gqszgJFhRNaHEgjH8zYWxduaTk3mO7HRipdVYaxOJHK5jWVXSvbAhpnoIwYrA+ShRNCYcKiFjFuodPMoTwsCgCalkDl6JM2r70ZLkRE3rnXSKDQ1O4VOn9cf+6g8CFZb6ejf6emICjnM7jAj6DGBuOz0JBUWR774a4c6uhgxm8VKwJn1fWIHfa0FdwIKCoiKWYsEILURRrBiQKWheAnpCHNYKcgVNOaz3lCX1MJY2SXjDZdeq0425zgl+yJgZDXCZFeTCvYjnLPAFNRVcj61PcJh4WuhqTqdzcNK9pjItjmmC2t7sMwV9c4MNoWIFqUwB8YG8PIeadV51IVStdV4PvS9KxYT6Bq/E/skGSOY7en4o4IUEyMi0MAO6u5MIBexCpDGRAsLHKJvVsge65gQlTUtRypJDLtZzFThIpYEWdTDokEwBxuYpgMc9IRMNwQHX8Ro5FoyxJ6uIapebleuO3ss+a8Vp+Lc7T90vDHPAjh7b+L84AgYJa4wY3f9LPanrWIiPXroQF3y6jIpKJXuCuft/qW9H4rQdx52Gb9xxigboHt+g0YK6es3CH7+Jvw/Zcs9mVQzQVeikdq9Z1BM1fKTWya1SgXQSsDrJU3104vu0XHOxJKyZGEJd1hG3un4dSpnC3yRla1kLXX+EWCaVhE0+B61PbW+CuxM5E8IpM4Iu1qLV4spy0OuYeZneZjKUYM71oJLshdpSp3kSqr5dg6rC6bchstcr1qYvUK6ivLX+cDcKeLqpyZ+uWY0arat+ffQA0P3sD7qRSqfgsmvMSiS8aWm0CekH22HKRfW0mtYl8XLNs8EXy+0wiRufsW264ctZwGdl9oFBlB4CCQkypDJE693rKENCcfQ+i5zVR3a0Zwf7xvAFPXhFVUUknMBwNIS6kPMAljFAYFoxp6C+jnHkqgJWcyIR9NVqfKEaK1Jfr+8qbngDU9xKiKaAlmY3WJY3lS4IDoVhERa/4VURnd853YeNm0nckpN99WdGb4+ffEToZidIkLXRKbDHW81jlQKNMpfrRGE79CGsPb185/mmNXsxNJRH30AaXq9NrPex593vsMNaMR5fcViNHTv4n2oEGKrYP6L/jzEEkqn1Oi7lkIV7T6SMQaUCTwNzqF/HGV/nISUFcHo1d+5EE+LrbFY7jEKIj0ZqAG2OXtTXuauAOE0Sc0Iz25woG82IZPJwmXISR6b0oqAiLzzd8GrVDKbjxGouIZkzYShN2j5ogLvDiChQ+UB6CKWhflQqi7Rc5xopa7YYkXEF0Tfci+ntZanEpcd/RVg4zChVNMKX2bNCI6hxfdw4powJ+/x2DOcyIsAJGKSleOAJXUvJImo9nSkh59Gq6VHI6J4Myhp232xjTxgH1wCGbJ+eEK4tEHRYYV79KDBR79vBPnk8FSC7qYSBPQMYGGxEU6NbBOP4Y9knuryZekcAEolGDvQ86QpNbTt6WEZbp12o5qo3SAjEYTcLdz+3s239GunaJi2u/slx0K6+tnXtO61y3g/mEO/nrdl/d7FYeG0ES9bXOw9bEPPZIT6gtcUt9e2ZseDzkV6Wo31sOTYCx0bg//oIHJJw50TVPVxCzlRByHr009/0wZMJtqSVJZUCMUdYqaDgLGQLcObDmN1WgdVuGWEn44RJ6yngNaOp3o59fVYoeYeECMRLWSxAVbP7Wfr0DhFkR/f8sAh4rfKZ3vVDniINKpSyC2rFqyHTaq0zqYdthNnlRTKtsbaNEBzpBDYuK1qa3MID7vHYqlbj2F5wQmeztU3r92CyT31/co07nSax3Hm/9D8eJ2cZsfi11mm1cwP3o+AnRIDhBSuVpLHdmuzUY9az7/mSEQ2tdQIqm5BXugpsS6YUpJM5ibPzVId6OqZAxpL0jgABj0myCvJKBcWKSRjuypXKGEtbd2ZTqDPuTqGtXfvkZ+YYsH3+Tb6XNgS0stnmjBkBbN/BUr0JkA6WI3o4wpgeDQp05uJnqsJ9zKAf8o8yendswrPPvoDte/oAlx+LFi/HiatWIOSpiSUecrv/9w5IDu7B039ej5df3QeD3YclK9dizaol8EjqlNbf4Z5tePSx9XitJ4zAtFlYu3Ydls4epVsdc1XlNP78+NMw1pGdbCxrXykbw/NPr8fTL26BYnRiyfEnYc3xS+GbBNE9pt1jP/5hR+CQhHs6XUFPogTTKDDv7zIwtNo5SbFu+1FZDEZUlCxclTgCAdsYwTR6PlXSZrqmu8Qa46TLmffV1+JID5P+cOyiT8heO+ktSVBAEs7RiVqOH3vIyGQus7m4RXRVADBWisiVPCiUG6UEngGjRBUiFulCVvOo9zE1kVXORhvXBejMGQEkkgUMDGjlDOlG161WLVZVlSaHMMw8Ddug+352vQ2s8CYV4kZPP+E3vX88FY3BUlkDKlLQ632a8MAJVlIpSOWBij2I41Z1oqnRpTFvTbSvARJHzqTymFZnHjNOE+w+ZhWFMxW9SKwEp9cLm92EyFACiVQOOQVoag0I6E2/tjEHV38wPEK8A4tIUHjSFa17WMbvz3Y0q11ztx+oXb4fzY1ueU927YwJ+LStzat5DWqehfHnONhvhhWcLjOS6QIay+7X7REo54Zx9x3fwl33/ha7B2JIZ7JQjWZ4PQGsOPVd+NyVn8XKzlEk9sH69X95e8+GP+Nrt9yMB596BRlFy6Jx+Ntw9gWX4guffr+UWB3Y/gyuv/pq/PyJDVCFddCE9vmn4NpbbsI713SNu7wynvzFHbjocz/FWy+9fqxwV2L47+/dilv+4ycYTBclR97mnob3XXwlrv7s+xD4x9KZxo3LsZ8HGgEdT3WgfUa2Dacq6M1U4PAYhZFuZMNR/EIhWFRUse7IK39QM+Z19IUxaxjNyFbckuPNc04kfCmExL1cLfHKHG+mwU226JMxJ/SMYsRQ0iRW/HDGhGjGhGTOOOYvU2Bcmo5rIypGCyoGFkmwyZ9qssFkUlBKxlHIjKWB5dzN/jK/NZZinrpmVdb2S7fgyDRGFyu5zUUwVy1Ertu5O47BcFqszvHXz9+8DioBY/+IDNdSr7L5sfXsef6qDjSiuIxvlzswtMECTnbLobvk2R6VCVjdWLK8Q/jaJ7PaNeFcQXQ4C4tRhcU6+b2rHTv9u34/eVQwZMfKFc2Yu6AFDp8P7Z0NmDe3Xixofaz14/RPCnG69AmC43iz6Mp+46HvzEddVYWXQFN2Dt5XegwY8585M4C9e+Po60uNIPxrmp3yV+mvyYjGepekUBKZrymBU25C27GUwt3fvhFfuPlHsHadgtt+eDcee+IJPPbQ/bj2kndh2yN34orP34ptg+n9GyYF6f5rJVtkVPUl9kf3j0y4s5bxMm5TRdV448esljBUbQrl6NapAArVwjDu/fF38LuNSVx08/fx2BN/xKO/+yXe/8bp+J87bsN9j78MtZLDL773LTz8chpX3fYTrF+/HvfdcSPckefx79++E2E93VStIB7eg3u//xVcdtVX8WrPIOzOsWllG5+8H9/74a8x54yL8OAjT+APj9yP89e14pd3fg8PP71jtPPHvv3TjcCULXcakvuGSyg7VZnM/54jRcud8VkKd32CPaLnFw52SZrW8rEP0DjPzz+Zaqs50wfYXTYRGc6Fx5FnneC8RE6rbU73PScvWvY5RXNJe7x2KM5mwGRDKOiQ0oUU+DPrHOhObkCmvxH2WdNgMBS18ZDStSqMgWZs2duClr17sWiBVRD3tVYhXcIEzbH4x4YNgxgcTIMFU1joYmAwg6FwUpDdQS/dy/RgjE6rVDqSmTIM7HDNrErhxFh5Is1JmFcxmqvKw5kvXyhqQoxjRuWISoK+8ButdjbJVLnRM+p7HOSTk7FqgNFig9tjE+E52TzPvpJmNhHPweXQ2Pj0EDK3jRW0GpJfzi7d5bXRPWKA3aoB3agsdHb4hW+dqXCa12HyK+B4UDgyLDI4SGKhiuTka+wHo9epPSeAy022PFXi7uRnn7xl7VgeR0WgtdWDyHAOkaEsaL0z9a/2Xo6e6cDf2B7HJBRy4LWdMezeE8f8+XWHbL3vfu4h3P69e3Hcuz+PH37tUjR7RqFPS5evRJPPia/97Hm8uKkHcxu1MqDJSDf++Nij+OtzG1HxNWP1ujfh1JOWwVWlqt301CN4ZmcSy5fNxe6X1uMvL+1BY9divOWsN2NBZ5OAq4rZKJ7+42N44qm/Ylix47hVa3HGqevQGqLrUcELv78fL3UbceY5Z2F6NSc6smczHnzsz2hbfgpOWz6HJYSwe9Nz+O3Dj2PHvgjqZyzGGaefgZUL2kcyTWpHsVwsYvpxp+HmMxbg3W85cQTk1eWrYMumy/Hylu2IHGfBH5/ejlXnfgqf/fBZ2j4LFiA/sBNf/PHTeHrHEN6xuAFKvBu3ffkGPL4jgTe8+R0wPPg8imMe7hz++scnUAgtwL9ecyXWdmgZ1u1XVbDj4ivxp2eexTvWzdKoSms7eez7P8UITFm4k3J1V6QMZ5BpWkcHsT7RiBvNQD5DKWqAxXb45Bz7nYOyymBCJRNHk3MIoTq7CIvxk+H4yV8XBLIf89snWHQRxjiy1aFZFpRrJG5hlbeAi5Suo2NJDD4FCIVEImuEK+jG7C4/0ukiojkzTji+Cb7tebyQi6NUmibViEZOq6owk0/W34RkaqfM8kyn4wStL/pk7ffZ8WpmEOGeLFxOs1jNBG61NVoE8BWJKtjbm5P0sPoAlQQgnVYwHMnDQStdb7BqmbPdaIqCyqspPlVh6LAaYXPYoZRNIhC4X0kpwW5S4LCOxtYpYEVA87jaxmvOM9lXilyzWRWWpx07ImhqcGh89LWVFKoHU7AWimUkElk0+PUKe5rCQeU1lihCqfIDMA2Rd4NAQca0KcitVqNkHCQzFdS30aVP6uDKiHU8mcU+pu/kQLDQYXagZ1lTk8jolSoUNQt/il4rPo/sV2ODC1u3RtA/kEZriwfF4iEObLXT5Hmgp2HOnCC2bRvGli3DmDcnKErKeAT/mOsc+VHEM4+vx95cIy774LvGCHZtFyNOfd+nMfukQXhCWrw5ue8V3HDNNfj5+m3w+z0wlPO452e/wDkXfg7XX/5euM3AtqcfxZdvuxf+zhnwmStIJWLo/tlP8cvfv4jv3v4VrGwD7r79FvzH3Q8DZofUZHjgl/fhgTd9CP/2tSvR4SvjuYfuwQ+eMmHeG88YEe6DuzbiB3d8Byd9sg2nL5+DV//8K1x9023Y3J+Dhx6Xh36L/7nvd7juK1/Be9aNrUfO6zG5GvCuD3xUnpeRIQCQzxGXY0FDwINkpA8DZS/OnD93RPhz366FC+Gz/AU7tg8AixugwoxFJ/4LzrhwJdozL2Pjoy9I5sVIu/kodvXEEGxbjBnNI9QpCLS0Ydb0emzcuw8xkkK59Zlo5MhjX/4JRmDKwj2WUjGYL8Nu13Kja4y3ozZMtNQLGSAVFewNrDYtPnskT8ja7bl4Apa+VzCvqwinywWmsjEWyleCkzoXTtwUQiJ/xKKhqTdxT3TBTyuKxxA/pa/jMSJwq22JISzBEZaq0RcJtsJmMcBiAJw20tzmEYsX0FRvg7Mng7KiwML66jSbqy5cKgpOvwXFNDUGUrDq7Y1+8jpYHpWMa357Seq10xjQkOsGFBSWaVXRP8Q8a4MUcpELVUtwGjNil1Ps1S68Nho+2aQFyYwDXjfJW0hfa8LsmX5RHqiwUN729maQi0fhsJCKl8KR1r1B3OQ1Bn1t8wf9zjG0IY9SNidKoADWat0LvI/spMGIeCwHlbWimX8nnhTy5lfQGy5KGmAgZJN4vVIgTkKFx0vkOxH1msuG1rY9oEpeOceYqw9VI+F18xgKzgP5KooKAZnEIoxVqA42ILx/5K7v9dJDkEFDnVNIdqakfEzQOI9j9gHfBQr3LRUV8+aGNPDgBErUmCbKaezoCcPU3oV57ROzp5ltHnR2VfnDlTju/NZteHRTEdf9+49w+so5QH4ID/3X93HHf/0H5ixdgo+/aZ6QISUjg+h68wdx7ac/gDavEY/8z+24+faHsf6596MTJtx5929Rv+zd+MYXP4FGZxlPPXQP7ntyJzbviaJjSUBwCbwT2v3Qeq3/NpkZrC7gt/f8DC/02PGV27+L0xZOQ/i1F/DdH9yDlzdswTtOmosafJw0oPFTjBkBqOkB3Pmj/0LE3YW3n3kiKr2/hwIH6j1jGesIiKWHJjuYEB3XFmjFue89RxrreVaz2kfmEQJl82nE0yV46kIYg0e02+B0upEbyiCfK0O0obFdOvbrn2AEpiTclYKKV7qLqHhVmfBFOB3lwWHOMwlyendohSrqWqqS6vUZIBP2lrmR5aKC1Ma/wD34IrLNjejtswgFKSdxnooIc7pch2IlGJAVtjEKbYfNiFBwlCNaPwG3UVhxzssqrC1ukDQ4XXDx5SSjXb5sQkZRxTLkeWSCEYWCRVsqQuqSYUETpQyboKuZvlXA9DYbnJUYMiTZd3FCrKpZVTyA0R1Ed7gOPb0xtHcEUClTMIwOGlUWFkyhI5yTNe+lbK0AFGfEELS3OmTiZsEc/V6z/+TPJgpfE2j6FWufVrOK4WxeGOoo1LmwfY/LLLoBBRSt4jAVthplR/Lei0wn1Hj39fONbf3gvyjgeZ2pLNMqtIwBOUqwAiyuU8TQcAavbR+Cy6FR5vK6KfRJkZsrqpjf7sesGQEUlLKEQsRbRO+D1STCXYSxeBkIhKP+NDquB++htgcPoaJDZYoxbM+YWXlsK8yLL6WLr4v2lV4Ki5XXPUqLPLb1qf9in3ntFPBsd9OmIWx/LYZZXf5qlbgDjYO2jeVSzbxJB1nSg9vw6F82Itj5RsxsdKB/326QFrZr4QKY//sBPPLw0/jwm+aJu6uh7Xh86pJLsW6FVrTk7HeejQce+hO6B3qh2hejs8GHJ595FD/8oQNveMNqzF93Nm45zYr6JhJ/5ASYypBUba8onLlOC4KY0DStDYg9hHt++CPET3sDVq1aiMu/dAPsnuCUuOBLiT5896ZrcM9T/fjItV/FiZ112LRTo4DWDQd9SJi1w76QCpujVtsv4inGL6SgZi0FllTV5xdtH5aONkI9QnUGxp/32O//P0ZgSsI9m1exd6gI1GsxQ/UA2JUjcdm6YO/ZXkYupaJtngkW+1EgzKFwq5RgyoQRdBTQszuC7t1RGC0slkJXbxGdM0NwO20ipBx2zc2dU8pIpsi7bhmj9RMsRYEezxhht2hSky8dhbk+/fGFZfyanMJmlwflMgUtU8FUJJNFGNSSaO9KqYJiQhEB4ApYxPrPpAtizftNeWTLRMvXvv6S8wZbwI9EcBGe2rgeZziSCDX6RlyyVCDYj3A4LUUybFYSuGh3TO8fJ3LOI9pndS1DFxUgr0AjnNG9DzU3mz0xoYg0Gc3qbcIDwGOICOc2KjWkpiVzm9NESl8tyE0wHRUhGa+a9g7lK8/AegPpeArPP7NT4//nBVSFt5HWUEZBNBKTin3HLa6rKnAqs+6lulrIW8FwOI32Vo9kG1joNqoKcApI9p9/FMwy9VdrfB9KP7V9NYQ8J/bJ6mCz6zyPw2HB4EBWngGS2kzd8tZCHsy3J10tJ3mz3TiS918dmkPuOseBGJCF8+uwcesQ/H4rpjV7NEDjZK2ZnGgLeFDsew290RwWNe1P4q0Wc9ixfQfM/ibYo8PIlfPo2f4M/u3WnfJ+aP0tw906H4tmNMjzxOfKOa0JPv8owIxVaM1WEzL5LPzNs3HtjdegeP2t+OWPb8NPv3cr3E0dOOOt78dln/kYFk23a960cQ44o4m1JBh+4ftlxrmf/DzCCQU/uPc3+NJDdwN2FxYffyo+ftElOPvMlftZ7rXDMNy9Abd//Wv4zXMRfPjKG3DhO1bJZrONvG4lZKmg1yzlfFYUTZvPNWE8v2ZX+WqyOuF0sERyBgVWVNVn86ICJZ+H1WOH5QAFdMa3d+z3P9YI6I/DAa/K6zZiRYcNj+7OQfGR3OLoEdjQomFRGAr2bBIi2D1BsqsdsIuHvFFc1mQWS5thNPvg93uF5GWgn+VDDWgIWdEUMGLXjjDKMKMxQKY2u5i5iVQR/WFF4uNabFab+Dn5U1BZzCqCbk0DomCrtXS5D0uhFs0GzJnlFyHIySueKmHHjiichrwIOlq5OdY2FuFrgN9rRixbEJCUy1SBUWXxmrHCnZarERV4O9oRzi/Fs5tfwhucOSklS8uL52YqF5UEK7nwzcYR4a4PIIUOyWgYd3e7zGKpVRivNZpRMLhQyipw24pSLGaMkDAYBO2eTGaRzbmEn10XRrT2lCLQN5DXUOIWE5gZwLGicGcpXP4dzkIvu0lNohBOQmrm1QwN+0kgo0OtQDV6pNCKfi5uY859wGPGYCSJ7n0uzO4KCnCplqRJv1bt8/X3lcdz3J0Oi9T2phbF+6K3r/eLn2SFY0w/lytKidfabQf6rrdFS3tfTwrbd0YFQW+3mavKqCb8tfj8gVrafxufx0DAIZUACwXGc/bfZ+waO5aftBL+nz6CXz3wJ5wy/1yMz87a9fwjuOSyr2H5h6/GZW8Kwmr1YM3pF+AbV5wLEyuFkUTKbIaJeAW7W2SYZESYNSCmfj65HimTrCIS6Ufa1Ylb7vo1Mnu24pnnXsArLz+DX//XtxAu23HPNz8Gc9VzZSbtZXXJ59NIZjJgtcVypYA9e4dx2ke/iPd96nN4/uln8LeXN+DRB3+NL10zAP+M/8Fb5gb1Q2s+VWx64lf42je/hz57J758xx1487LRipvBUCMCahp7+vprjgGGu/cgUVDRNHPywi21BxicfrQ1evDCzh5EMpCqhtxeiEXROxBG3ZIQvMfi7bVD9k/1fYKo7P7XT0t6cYcZbVYzsvGqG3f/3Q5/TVWLjvZVkE0A7fOM8NYdHcFegQmDu0qIv7IDcxuyWH58O+qbQ0ikNcvK5TShsd6GrjY7Gv1A0KvpQaRVpSWoCXKNUjWdNyKZNQoILpOnda9xy3MfTn60wkb+OJFDSycjbSorqlkI9CqUYDEosJkrWqlXWp0sa2rUiEjYH0OlJJav3ZpHJash28dPrnTVUVB6Zi3E7lIXtm3PiHeC7VAk8ZOc57R8apUO/eaxy1lJpTOgrckuzbPiG4VR54w6GBxeDCat4o3QuO+rRzIbjUNUVhCNKyiRdbdq7YajCjZtjSIxHEfQWYTDqiHjaa2zr3Icm3n9MlM6QS+JxULlqubPpPWLCqmJAsJMN+xYiURhyDALFSNWzdMUKn1EjuwnBRDZBAlSI0hRKzyjFa2h0B/5M2llXGm98z4d6tDwGljfvr3Ni3SmiI0bI3j5lUHseC0qRWYYdpJnwaQBZKd6lfT+DEeJbYCUm9UViQMdv+ANb8O5Zy7GL//jBtz0nXvRHY4LMKxYyGLLs7/Dl754I7YXPFi7egUaps/FcR1+PPvQr/DstkEE6urR2FCH2O6/4d++8U08+uJeORXHbcJBUcm5YEMuvAs3fvZSfO2Hj2Da8nX46Gcux43XfBbHzwygP9wPRTXDGwpiqH8zXnhpM5RSCbGe7fj9gw9h72ASNpsdZpTx0J3fwCX/+mXsKdbjre/9sOShf+Qda1FMRRBO5Ca4bBWb1/8cV111K6K+Ffj6N28VwU5PgKIo8t7Vtc7GcTMcePy+n+KPG/cJ58HQnhfwo7vuh+KdhVVzxlZvm+Ak2iqjF0uXL0Zyx1/ws5/9BomcglI+id/8/G48vT2FZYuWgbjRY8s/5whMyXLn0DhcRiyZbsGDO0oo+jlhHgjt+/oGk++rklcRHVRR12qEJ2BAWauu+voanOAoCjalZEZkZxHYvQXHN2/CicssCNV5UCgZMW+WH43+itTH5gTpcVukGAqbYgw6mysjV6hgKJoX1DWFMmPy+aImwItFoqppQWsChJOy/NXMzkSG0wrhxEjrlueh+9Sgau5fri+TmlRi0xqQi4LUBBUFpQiv3wFbMoOSUobZRmVibDxOLZVhocRsXYintyZhMQ9i8aIAqNCQ5SLgd6CPx5MtjSlWNX2j9U+AHUMQDocJjPvzuuuCFjTVWeB2erGnx4LYcBqqIQenWcMmsAkKC4e5hEiYIC47fB4TMtkydu5NIzaURNClVuP2msXH8zLmzbi7DNJYmTvB3TvYqmoDte1Uv2tjrSlm42S7XD+fPbruSU4AACAASURBVJ/bhN7uKP4KYPGiBiGpYS76kVzYDwpwCnci2flHgUlkPClnteeBSg+pastCuENSHsFkTCjNJu4dlQgqVzNnMFXPLQI5mSpI7JylZHfvSQgHPoF3vM98hvWFXp6JFnq7+Mzs2ZuAy2NFXZ1T0vkm2rd2ndndikuuuQkDyS/gR1/+HB762e0Ieh1QSwrCvfuguDtwxQ034E1Lm+XWfOSSi7DpiuvwqfPfiW9Ob4HdWEZ/fy+sbavwhvdrwDslz8I/WRGW+rnUcgmZdBLJTB7TZi3GGWtm4eY7rsXzD/9IzpdLDSOOZlxwzlnwmy1YtvqN6Lz7YdxwyXvw89s74fbUoU4tojXoRz6TBoxOnPovZ+LXV3wF5511Oma01sOsKugfHMbqt34Ap87f38LOh7fhjttuxW+f+xs6s0Zc+5nN4iInaVGpbMbat30cV116Lt7/8Q/iz5feiA+dcxZmdzQg0bcX++IOfOamD2HuBCVxK6Ui0qk0cuJ/16/YgONPfztOe+xJ/PjLl+DJ+2bBZczh1a3dWHzGR/GOU5ePf9T1A499/hOMgOn666+/PpvNilYZqKk9O9G1OywG7B0oI2WqwOacRHOe6MAprqOHIDagopAD6tuNMFlG08Sm2MTku3H2NllRLFSQ3N0HW89WnNCxEyuWmuDyODUAXKaIWCwDl41I81FLlxNuOFrEzu4sBvoTSCWy4ip1GTPw2IrwOLQqZ64qKCxfpEWugeJYb5zmOy13yhn+MRXOZLGhocoBzgk/HleQy+TEctcFXqFig9XO1Dxa0yrSuQryBRUulw3hvB9Fb5OApsZIZ30E1ApsHifSJQ8iewYRcmYQ8NukOguBeelkBnarhsTWD9G8CyoyWQWZdFE6y5ru7HRjyCYgO5vFiFDAhmwBKGRzsJuq8Whem+gjqoQdXB47vG4LBsMKSM06rdkNs82ORIahwdJI2VwqVKWSUcac5+eiW/wTflbHUB/Lg31Ke1SwVBWpggnegAf1of2BkLxLDrsJxYICVrBrbvEJ6x7vzZFeeH/pAervT0uYpVwqS3nVLVuHMRzLI5FUEBnKYTCcFeWnrd0ryoAe5jiU/rD/VBqYHcF4eSjggM9vk2yGyFBG2ApZ4jiTLSIay4uwZjaF4DPGXTsVweHhHPr602hr9UilwbHpcDyA+IcmmC1j2eZcwWlYt+5kzO9sQioaQU9/GEWrD6e+7TxcefUX8LaTF42EjENt87B69TIpc9y9aw/yJi/W/ct5uPqqz+LEqlUbG+xBwV6Pk9etQbNX41YoZlMYGE6ic9EqnLBkHpatPB4d9U4M7N2D/kgK0xeuxYWXXobzzlglaaR10zoxf04nlKE+DMRLWPPW8/CBt58Ct92IjoUrsGhWK5pmLsCKJbOgDPdh775+GNzNeNv7PobLLj4f0wOj6Wf6PUkN7MErG7bD4G1A0EXvTEbAnMViUbAvrbOWYfXxi9ExdwGWzJ+O1HA/9vaE0TBzFT59xZW44Ox1sE0APCym4+iL5LBw7clYPrdVPx2snnosWbYEIUcFO3ftRU714KzzP4nPX/YxzGn5367ZNtLNY1/+jiOQz+dht9thUFVVjUQiSKfT6OwcjQtN1BcC6f7wSh5/jRcQ7Kiy1I2bACY6birrOJFzItq9sUzMClq6tFSlw51cZZISVHwZ+WgUub07Yez5G5bMrOCEE1pgd9jFmiHNZv9gFls29gonPOlUBVleKGNgqCDpUqWyikomJq7nVMGCek8JRInTEudC/SGRNSFdtkuMt1Qsw+WyoFIqwWrIw2Hh1KcimTPA5PAKMMlu09LBXtudRjw8BI9do5DMlUyw+JvQ1tkkEzutcwJ9IpE0PE4TtiWakGo/Ce66AGixTLTQkibL3eDmPWiKPo13nWqF1+/Exs0R9O8Jo86vuYE5xiJYVXokytgzaMZQTIHFUIARebjdVsyb6YHHbRY6VHo/duxOY6AvBoe5rB3LDlSR5LmiGQ6PG8GATSz35kYb6oM2pDNlvLojCjWfgNuuPTgMacQyRvgcLIs69io4VnwuRCNi2hlhDIJ+11aN2VtCOtX8wpoNnCcZb0/mjShZfJjT5RePwhgukOr+NF7lul0eLF7aAruVcdcj84DzMvSWRGkxGrDt1WHksiXMnxfC8y8MgIjopYsb4HJZ5TufP50c53DfA16ipizRDc+W6QUqIxLJYmhYcy/TC5UvlDFndkgofEl9W3v9fEfo1o/F8li0sH7E0zA63LT4DbA7l8LmmD26ety3SrkkHiiD0Qx7bRGEcfvxJ4FhqtEEgj9rF7bB99FssYwgxfmOlEolSIWwGhQ8M2IIUKWiPIHcFHCpUiZzoHaOUlERjAmVmZFFLaNQKLLmtJZGObJh7Be+i8ViSc9hGbtRahuYBT8gzzWfiUoZ+YICs9Uu4bn9DqiuYLhNrs1kFm/bRPsphQLIlWG3jWIIJtrv2Lp/7BGIx+Pw+/0jyvKUrpZFW9qCZmwYLgolrK1KOzqlgw+yE4VLbFBzUfvqmMoBVCaWWQdpaXSzUWLLRuQjQ6j0b0cT+tDRkoESMIobnXXXpVAZD6nSTpKPhgArToCxeBG94TwsViM6Wh3inh7KUwsxCJqVsXN9wqZg52RDvuz2aX6xjAi8mz7NKRZRNGpGrlCGWiZFqwHlHNDdl8P0aXYBtaUyJWSKRGhrEwpZ7KZ77Zg3LwS3i6h2zYW+eStQzOXR5EohW0yiUglUY8h6T0avn5ae0VKBs6UJ8WgTEvE+hEJ2kMRmTxmS004lhmEBsR57S9g02AWlfj5c7UakwlGYBrfCYQgjmiwJqQ1BSKViBUG/BSZjECwYIwKX0kvSuxiSsCCfZQ34suTJ+6UGPKcdLYVMT7aQQ4yquOWzRSPM4/BZAhBkqKZMFLQNbpdV2lCKegvatfLKCbASF3rNMPB4Qq74R4Wjc5obWj36mp1Gh0vuJcfsSAhSvVkqWPwbmcx5aoIurUYByWWrNdgZhzfy+umitzJQWg2XMERzhBQMXhc9GOwA+0NhPW2aB83N1RxzVcXu3XFs3hxBLJ6X8rQMHwiJj0HDhdDCp8Kq16nXr/NQPolIdzjGaXKTNECBPNHCNmSYajYyjc1SA47TN5ksVlGs9d/jP5lqVwsqrwXYjexrMEmYZuT3JF8M0q+pXRuboCLicIwi/idpVlLbLNYDC22rbZQdcrJ2jq3/5xmBqT+F1TFx2ACbCaASe6QURE40FDDxSAU2B+AOGKAepmA3MP0pVUFu1xa0q1sxt6WA1iYrgnX1GIoWsHXbsAipkVstfaDlp4ri0jeQQzpbkkIoDXU2SY8ig5lMjmKhUqCQwpQtqGBCG0FkXEHwG9HXSskKurHdTqcwp3X35cUdTaAekcbJdAm7unNIpIviAu7qCo0Iasa6zUxVK5ehVkwalzb7WGY81oiGgAH7ijlBU5PefjKBRMIWh8uMlK8Rz23aA5MpAa/bKtW+hmNpSQPjPX21p4SXYgtgXXQymjv8sLsNyKSMSLzcgnrLU1AK3egdLIwUpaECRLCXxarF3InBsNuMEj4om+xIJ4DpzZYR65ewAE24jIy4fKElRU55f9Vyr70OyjQObzpvgN1tR8d0n+ThMxZdu1BcUQAJ9KBGbrNnjHUWBCNRkjFme7XnqG2HWRIEmpGV7nAXxszpaeFzTUDb8HBGUvToHmdfGXdPp4rI5WlpQiz2ra8O4dUdwzhuWbM8N0UqgtSd5Bk73B6NPV67F9pgae1rIM8ZM/xwOM3Y251ENJrFogUNWmW4ak15WqVmk+UgOf41Fu/Y0x77dWwEjo3A32kEDlm4+90mBOxG7MuXgCOUZsFYeyqqQskDDe2alXM40yv5zwt5E/Y9uxmdynqcfIYfjU0BlFQN3a7QgubsVjtr0qqpqIgn8xgKF8UV3tbsEAY3TlV6TW0RDqSTLBoRzXACr6YVUbgXK7A4aHFptbBJCMOJnALe5aQngKApAxpDVgkFNAQtIjBTOQPaWhyoC2i1xakwxJIllKtV2yjktMlYA+ARXW+0kOAEMHGgeBdrhFrts8N+my0GuLtmoKfXjF//bbdY/QG7F/FkAoVcHA0+AyLZOrgXLkHdvBCM5ZyQ3zA2bjRF4XYD0zobkc3tfxIOIYVTIV9GjnHbTB5mc1Hy8SXVqKZjPHp8CxLnrzLVmSco90prn6xdBPi5XEZYTQaU7RPgPaqMgrXXLt8N9AYZkC+kxZPA87PPtQJev6dU3pgt0VpnqebK79faAVewXTPR+EaDjEVkKI3wQAq9+4YQHkyipFowrb0OdXUOeZ6y2SLq6h2wWs2iZDF1bW93QmLxhXwRjY0eIbmhwnE0F20sNCAlPQftbT5h4Nu0OYJNW4awdFGDhGYqFnpejGDJXKZU0vLfHwdAoGgBlbJeKXH8HT+aVzLFtnVlqfowyDNcXae/Z9qTqu9Ybbf2oZniqUZ2Y5psWWMl5LhpYbCRrce+HBuBIzICRpMdlYokAx+aW55nZ1iKgA+Jfx6B7shEy8IjQxRCgDeoWTuvt2m2B4MJ+YEBuGMvoXFaHi6vW0BSRAFbyPY20UL3r0K0bQl1PjNamuxipdKK4ztNNDNT3HRXvNlUgc+pbWPhEwHQWbXc9FS6jMbgqEIgbtWS5l5lrFfnK2dpVqbV1fmtCHotQpwj84fuRZggjYVArFisAIPZhFI2P7UbWKnA6rDAPHMWcg3t6M0kMZQPo+jxo7t7N3YOqrDPOQ6hzkYYywXAYEYpOYTchqfQUhlER5sXHW0NAjqkdcwh1oUk+8MxJ9q/fzADpZJASSnCopLvrkaICgcABUgZViO1Fa0sPePhUkynZISz+lCNEQcix1WJ10eGi4JoF56FMTvxhu63Qu6yPA+gt4NuaO3ej5+juQ/xAINxFe0zGzBjRrAKKJu4zdrHR3O5a1Yvq9OFIzmEB1Po74liaCAGJZuB1ajARTxD2gqLOYQF8+vF48Pnjd4PAuwoLDmuqVRRCrRQoNN7M39uSKz8/YVobS+O3HcqpvSMuJwWLFzYIPz0xGgsW9ooNQ7ohWB6HT0PusI7enZtfJXCHpSK4dHV/1e+VVMzBXMgxaK0mgHsXiajSC/tDi2mTY8Jb4gu3hkWG5eUMuWr4vPF56SnJymemuntPgHCHulMjCl36NiO/7Aj4Pb5xHAEGqcmG2pHIpqoYChXgZU00Qef+2oPnfA7rfZcBuJCDzRqCPlJ8GETHj9+JYVebLAItXszTlupoFz2YGAwjxmdNqnoRWFElylfcH5ykuLLx++crJjP3jHNLshwxtCHhgsID+elElg6W4a5QmCbxhfvtNC9SgGvx1RVKPGSgKQ4NCTf4MTN9vVJora/nECozct2CrGqgVY7rLXfeSxrducyCrq7h5Ar7YFl1kJYbBaolQPnDBKQw3Q6l88C1Uf6zXqodbNQMA6gbAd8s5sQ3mdAOlaCxQEE8zuxsr4X8+bWwe1zCWKfeAT2lQs/OWExvaq7OwlaoRQMzU1Ojb89p4wC7bR5Uq6PgpXlVmVEqkA+Zu0pk4RhtJx0loRVMRRVEIurmN7igN0+kdWo9a32f+kvS61SuPO+126sgiDZp2hCQag+IC5xPthkKJxsYZsmUoXSi1Ci4lHC0FAG+/ZGEe6PIh1LwFgpwG0jAl8bJ8aDS2pZcvoZr2YIo1wmQJEkR5p3gaQ109u98HqsolDu2BlFIlmQrIq/l3DXr5nseU6HGQvnN+DlDYN4bWdUPAxUkJj5wOe6PF5Lqh6sVlg2OKU39X/mk+8+7yqVF/4Jg1+hhF17EojHc3IvrDazZAIEAw7k80XBOxADIXgP3eMz+aMx8bXSoyOZBmGZY9RKASVlIuVo4sOPrT02AlMfAdbOcMruh+yW7x0uY1ipwO8emx899ZOP3ZMTZSGjgmVdPQE+8GO3H8ovuofLiorwXhXWlB02pwNWUwn7+pLw+22gIsF4dTqjQCmUhESEwp3c4vwk5zrBWoy1y3pO+vECooNxOKxkZTPCYiPKWKsWpgG7OGFwcqaQpl3PEqoqhmOKtNNUZ9Xi1DKtaBAvcclVBYscVy2DykmexwtIVwQp858JEjJKGg37GfDbsWhRA4J+K/Kbo4ju2wlL1xwRtPtbUuNGj5SpkrddEeWFNXRVdzvcTQYM9ytIhovwNNqRjkRhKfRj2YkNcHpcgg8YP48zPp1MFrBhYxhkPmtqcokrl+P7fDgt3h0KgP2wYPoEWdUNaeuZGNqoAshEGNd0m1t4bp/bjLo6G/b158SidTpMWgWj/cS1pnhoQl1jB2QIXfo/TrLLTwMQjSnIFU2Y06IBy6hwTbRQmRECoJKKbL6ERDKPPbtjYqXnkimUC7TSy/Db+QzQPa0JE2lN6Gq1kA3bp2Veex4qRgSZLVpQh/6BDF7eGEZdyAGfV0tbm6g/R3sdPV12hwnz59bh5Y2DQoPb2OCqvhsTj5HWp2re59Hu4BTb15QxA+LJgqT+xRN5pFIFeNxWAZXyXe/sCInCH43msO3VOCzmpISbmDZK6t/58+owvc0rwr72vk2lC3zfmRJbUMg6aBeEP5XCCR7dqTR3bJ9jI3CAEdDAuNzhkIQ7C8j0JkpQnZpFOmm+xwFOXbuJwrhYBBLDFTi9RljJ7nqgOaP24PHfq9ZxZF8JNocJznnz8OKuKE6eFxfGt01bI5qVLsjqipBx7N4bR3KTIsKJBTwS8TwqRguGM5oESqUUVIxWhJqCMKkKmH6jGo3CHOcmis1WQa5SljKgRGRTMSkRyFcE+oeKklbEF5vV1fgvmS6jPsRiHhVk8xWZWJh6xRQzguroPeDEQYE/FCvAFbAhnVIkBUev/z2rKyQW1MxZdfD70vjzhhex5zUrAnO6pEb1yAAebBwNKkqFClIRBfkEBEDXsdgMp9+IwVIGwWQeVptD0PDj7wknSwpt5mLTGmKNb1qdFFDRWAGFPJH1YzvAX9KOKDDjb54GqKRet18kQjUIKpqKhC/ggNNpQiRaHCm4IzoTpXTN6Yh1sNto2bOMa17qALCYj887ijbWJnxgMKIgWTBiwaImtLV6ReiOv17eF4LjCsUKopEs+noT2L0zgvhQEsZKHhYURJmxOgmwNMnY7KfUVJU1m5WUvvtfP9fwvHTtc9Kn0sNxpIU5JiWreigVDU06aKC7iVs8/LUU8F6fTcbm2ed6UV/vFEyBxGgOv/m/SwsU3kzf27gpIorytBY37J1+qbLI4j2dHT6h1OXzy22JhCK8/3x3CXylO33rtiFRZt1uG5qbXGIkcP+pLHx2wpGk3F9W6DvceXMq5zy2z7ERODThrgDJfAWYODvlkEeTL08mocpf+1yjuHEnyj+eSsOc6+jOZ6EZX4MBFocbyR4fHPYsli1xS576iOFWBT4xb5UvPFm6WlpYxEV7WSmwGEPesnUIs7qC8PvtEivbuTMmvPMtLVqlOALzqCDQxUori7F1/X2nG5kgM4nSi2VuRCKfRDSeAYlh/CEtbml3G+H22RDNlFDIFUQQWW0WeLxkJTNiT3cclRI9ATlxFfu9drS2sj53Bc0tHswbHsSmv7yAosGP+pl1MEqYQJv2yXhHa79W8I2MJXndcyrScRUtM41on2uCxWEQkh+XJY+WUDUuPuHBGiUuU90CAbsAqzgJUuCUSxWUqbExj50SVB+QquBiFStRKccJZO5GSxfVePxIP0G8gyqsY5lsCY11VklRLNBDM1xAPp2B00q1oGolS7e19DO2obLADnOPTS5UKi7pI8vo8l73DuYRTRuwfEWLMLnRmq6Ng4oCYDIKwcu+fQkM9icw2BtFPp0SZc9prAhAkt4KwWJUsyhq+36o3ylMySB44gmt+NtL/ejpTWH2rKAohxQm4j0wGUXo53KKWJXEg5A57mgsvC9UNhsb3WhucksY4qAeoqPRkdfZJj1Mg+GMvOd0tc+dExIPCe9tKOTUgLUwiFeCp+B6PtMM23G8qdgxu2TvvoR4VMrlNOrrHP+PvTf/juO60gS/WDIzct+AxA7um0hRtCzbbVd1u6vOnJnTM/3b/KdzTv8w7unuKle5qiQv2iWKIkFiBxK5b5EZ65zvvgwgSQEgAJIiVc6woUxGRrzlxot39+9KrATjJNTy5r4xdr+N7yPNovbYDi0AtPxRiaCFbnpMKfCmKXAu5p5MAYtZE5vt17CRcN8PgH6LEcaAlTqeB52JAOL/BerVQNLpGLDmdUeodDro9UdYXEojkVAva9QeN0nmeNOXRlM3X1jXVf5vMneCixC8hb9xE2BKVbNpIx4jXndezLOMDqdPlNrM1SuFw5xvbn6TUj03CEZR68Yu1tchpTNv3GTEnSbR0fMLGXnx6/UBdnb7ku714YN5YcxkQty4udms0zfYGWFZy8qmwnTEynwGl61nePTx/wv0fwHHs2DoNjKzKcRLc0ikY9BYQlIihKLZk7lRWwfu/AcDhQrBbJRwxHHHjEDw37nTnbQNkTdToFL8m3RTohP9lLxLfNJjvzx7pZVGaHJMg1LRlnYHT1Wdm7yE39k0x0TBIZM0xURPkz9BS7a7LoxApRLGxsVnuHeS13FERoLZjSEa9hCu42HoxtBseYISyCIkH94qiubG8VOYIipidEjWQmuIr7+p4duv9+G2q5jJ+sjHKYhy8gpo6QdaetTAMZ+MpKcmSesCxx8xgclLKfRyzTCqmmhxyi2j3jni0e/t90HTcsToWZZ4bi4t9I0EVPVcJlt9te9cP/RRMx7gp3LwmdJ1trHRFpx91qDnHKJqfGrJPr8vcO1QwIoOpremMzHcf38OK8s2Pv7jDrZ3ulheYiyFUkj4HLjmKXDTtUdsAFXFj62ovYCCK587rz3umUf9TT+nFHhdFDgXc+eGM180EWu4whwi3/BFBsO90XVCdBqBMBeDFZ6O3qlzNUnG1G/R1x6gtKAjkdbg9g3M5+kXHo6ZLn3rR2xDMQD14vE8N8Uo5cggyIzkGIdKE/VUfW/fDRCQybDWexh9qjZ297pYX29jdjYtJky+xMrnHI5feAoxGm7dmsXtWyVhztyguQGRFpXZpJj7rGQMjbqtJHzxFROEBVJshJjttA5I2+NNYmYmi/fvFYDPHiPeamI0ZET2EE7bQhWriK3cRmZpETFi0E8SOIS4QrIFxejFYsKYAd3AMIyh1acuKvqIxBSc9EAOmxwzdwaWMfo9HmfAGDc8JTBx7rxWzeaoNT6SRCyQ1Dmi+yViCpp2Urnh1hk3AomRoO8yOXZflBl3MCyIhaQ7tJHXR1Ial0Mhg4wOfvdDXdIOCZ1abzmYnS/il7+qYPVSCX3bxcFBB92OLRsw66hTqhn0PbGYUMi7eWsGj7/swDRsEexk3kfLKerq5M8xfRhtvn8wkKpqjBtgcN3kuowaoBuCjImMht/JLCh8EOGPwYv0/8YTBp6stXBQH4ggynWZy1KaUQyK7yv/Ea3rqO3zfEa0bDRtsRYsLGSk/VeJjTlP/69yLedv2xSgNFxaLYigRDdHdKg19vKHyGfNtUzkQFr5Nrc64rvnnXx+rD9BAZzrhdo842QWl7KozKYl8JOmf/ZLtxDpObm2o7FMP6cUeN0UOBdzZ+fFrI58QkPXDpDJUOq9+JBokucmwUA6anY4IWL6tB4YJMdiMztrAZI5Dat3DQy7OmZgwJL0Oqm+cmgW4HYXDZkvmvwd0wHPyxF9jv85OV/+xA2EgVXrGyNpmAyeTD2bZWpboMz73zVQKhHmNkQhb0if3CxoAubGTqme2sRwGIgJeDjyQLxvMn4KEdysTT9UfmZqpWPBg+VAaRofjOhPNrE63xuXkNXgOgNUmwdY/+4Jars/R+nBB0gQPnbCfMumJ5QUGT/n4xop1FqmwGgS3euHh2LY1Chb7ZHMgQIKXRGMU0hJQBmEmTJfnxj2dF10B4x1CKG90CTvzVkB9h0THduQcrmTdOb8ydxb/QFqzSSW5lSeeDpl4ta1AjosTvOEwpZi7tHz5bjZds8x0e5rGO3vwXSagBagPjjAP7VreLhQkfS83fVduIMBiGrI9ESSqby8it/8x6u4vJqV58G5VdeeIQ9XGPzhQvohgX54RtOQiIXYfbaH+l5D/PJLl2bxy1+tSMzHJIMnM6H5lub5jc029vZ7yGctlJkfTwyFuI5Ll/JCa97H9caKb1w3dCNR+Ov3idWgo1xiYZhzv+aH42dbw6GPzc2OCBBM3Xvu2Rxe+W594TrmO0bcgIFNy45C2rvoKPnOcU94786s1JVgGV6+o1zPdH3RNTKzkgIFw/29HogRcPWKi6tXinj0uInh0MHNG8WfBO0uSqPpfe8WBc791s8WdORjOvY6PrLZk822p05zLL0OB0AqC8QSKqDq1HuO+ZEaGRnU9uNArACX7xJABBg0gFFjC257C5X7eQlAI6ONKrWxKfXyEwqW1nFlMmNb8tu44h03TQYzKZ9qpDGriHsGRXlmKIAeB7UBVpaykr/MKl9Pn7VEgt/YaCFtKX9evcqCoiY+eDAvfXCjUEw6kDKl+/t9rD9tSlAftYA/fuIK72AaDufEKnLV6kDODfoj2eS5yTK3vNMeYHUuDsJm0m3N8/EksJoKUSk08dXmv2D/SQ6lezcFW/vUzVkLEcvk0a0VUa/vY36ROOeKLtF/eT/pRy1xb68Pd5wXTUY/6DvIxTWJcej2PIGfTSY0uNwEGVSo62BwW2JCaOIzIEZ/Me2j1af2HiAdPypJy/6YxpYyRtjY7IhptVyISeQ/gxUTMU00dhwpZYc0Hjg6dusB3G4Nlt89zGjw+w6a3QaaG0/FQkEkv0igCxgkGehYvLIisRgUYshs378/j4+7fXRqByikjq6P6HLaJ+dA5m76fYRuF71+gK2nwM1bFSwuxME+o4MM3IwZ4mu/eiUvgZlbW118+21Ngtto+v38ywNcXs1JND2LudBPXNAtYWZcR5uU0QAAIABJREFUM4wE39joo7o/wP37lQvBxVK4pKCxttYSS931a0URHF7FEhDN8U1+0vzN2A6iUFYP+rhxrSgoexJ/8godyzqM6bASlrjrKERSuOIf1w73CQpD5SKLPQXY3lGFgfjePngwh1xOgVe9whCmt04pcGYKnJu5s142zaKhc6QBn7m38YXRvk7NVWfUeXTiHA3JRqwBB5sBBh1g5ZYBRiv3uya09gjVp58jY9ZQPbDQbNUOTZN8QfkC0qRJRrS52RV4UgaERZsW/aKDHiFDbdksqZmT8XOjiMUNkdJpZmZkODUk5ibfuFYSpsOIa/rpaTolk+42O7h9JSV125/tDIVoHAMPfnAD5V+12sf+ThMlZg2kWA60KdH0rGAmQgxrjjPKWgM2nvbk5kIuhpQRoLKcEJMf243aZmygr2lIZS0sFh083d1D+vo1QXmb1N5fJDkLWcQzKbipMmqNTSwscuN63i+pNjJaJ+gC0FGtDVAoWqJJUtBpa0qz5RhY9pXocmwjbQdo1U14Q6XFTmq+HHcmEQiscYfmecOXWu/RfEirVDzEcGBje2eAbConQlG0uUbXRfOhxj7ydew1QozaDSTQU9YhKZwrxfGgExFP7OuAFqfpSC1EPms9pAZPSGGV807GwEjqu/eX8Od/7sD3eoIP/2K/Uf8nfXLdMKLeHgKpdByFPLXxF6Qnro1ABXOZZkyCt2jSrR6ksL7eGqdjMU3NlEwF4tMT753CKLV3+uoTliE48cQgGI3Bcs4brMrnTDPz3n4Xq6t5ed5SS+Ckyb3l81xjdBVub3cl0JWWlo8+XEClkpbaA+d9VsdNJ1pv0W+kEfvlCxn56in4EqiGOfMsxsOSvfOVtFgSXscYor6nn1MKnEaBczN37n9WHDDJ3MdM6rQOTvqN97JWO7X28btx0qXHnqc5vlMPUd8JMbusozCrodcBhhs+5p1NzN7oolJZQMIipCvBZlT+EYNgur0Rdna7KJcskeqpGUvFp/HmTmY6HLmi1Y9cD61OiL3dgWx0ZGjN1kB8s8822mL2/OXPFxQTYH10U0NKCr2EmJ/LotPoyfjpX2V6FqN3uQmrWu30Jis6coPIpA0U8zHEEzr6diBpUYSqjZg7x0m6URDhnUtzqgwrGQF51IuPQ9KpHB+1ThyBUZAgN+aNn3oEoTCtfrKIaicOz/WgCZa4EorI8Ngfo39ZUaw3cOAfhLh2tYhmcyAla4u5hMyT5mMyfzKVyQJbjBamX56afLSGZFQUBlIBql0dLdtAOU3E/qN5UejKWR463S72DxJYXTpK25A+xjySjN3xDWHsdqelGLvw7QlHvBBBU2ldPyAIL5YbDn/hOGmtoT/V1OhKOfzpXF94H+MmAs1ErpQVBn0cc2ejXLfR7LkGyCBoYmdQXSGfQIxQt6xDn0+MrwVSNJvzXh+YKacEyvbR9w3cvllWFQpfkCN4P9cerQXHzYm/ZbMq157MK7r2XJP+kS6m0N7ujqTaHs3jH9yvCNQvhfbj5vY6hqXafX4xiH8+FcfN6+PSwuMYCPU8X0ev0zamFHg5Bc7P3EXbHOOHvLz9068YZ0sJcz/9yud+5SbLmu+7awGyJU3w6O0BsP5ZiGLzMWYWP8X7dy3MzWdV6s7E3TSxUrJnpDvTYlhxjdH1YwVOruQmUa32ZH+nKTKfs+A6+8LQCQfKgh6M3GbkejptysZBeEo1D01yYh9/V0U6aWCmaIrvU3xz41Qp7tl80ckkybBoRhWT3hg1j/2TkZNJ8Ts3dt4T0YkCCg/eq76oaxTjPtqoCaqzvefjUecOsh9eR4KY7JMTVXf/4L9aGMDIz2CvU0a1WsPq5bL4FilU0FpBiwbTi3gsL+ZQLDF1CELTfMbAynxS0n0iRjbZgR9q6DsmdHjKTM0VOAH2wsh5ln+t9wxYxORPHEktbI/Vu8yRI/nrLCVLs3zM1BCz4ui3TKQsF25gYL8F9FptJILO4XOZHMfLvoeaiXg89lxgHvt/1Q2adGIuu5GwMLeQE+YtWQQvGRAfWwAVMEfGLjEbE6mX0e1HqHEhMpmYuIq+e1THs/U2bt+aGUfpR1er+bCiHn3GXE8UvCJGz/nSCsAgOhaSWVzIiN/9NPS+o5Z/xG9UOBIm9nZ7+NOnexI49+GDObE0cM2+jYN0ZArr9JhS4G1R4ELMXQqXjLWu5/WbNz8NMj1qafvPAtGk5i8rTXLvGTDydNgJE74RIpPWpZALmWp0KOYYiuau6Tosi1oOYWjpV4+uUpqMpK2IL57FXgxJR+JGR3N+HKaY0QlGMVfJCOPjRsh76vUhDvY7qBQ0lPJMf9NFU6cPjuA1z5614DiucOtO1xWfPcfFSHiCZ1DLYDssYiLaqDES5qQYPW/TJE+e9xBjncyfB58JsfkZnMcUPwZTDW0X680StMv3UFzJACBE7NE8T/pG/7OVy6CTuYbP1g4A1IVhNlpDqWTG8TFQiOVjmRoowoimYHzZvBc8LyyxH+mWKGypOGLxIjzXRdu2kXBHSMZCQczj2PhHhu56QHugI2GGgvgXjZvtsL5Br2djez+BpTkLScvA6lIaa6MR+k6AVk9Ht9lE3GdwI/s+3yoVIUnTJTiKc4uYHaU9ib+gcPUKPIOM2kwnMTOTlvGdhwdwfXBdnOWgAEAAGqZy1mv2oXDI6G3Oi66G9c0OaNZn/ER5Ji0VDSXOQ70sIgwQZ17QG50X1P6zDOINXyPrX4MIm18/rEkVwgf3K2LhYLDp9JhS4K+VAhdi7sq0/BZIxqAVUzF2muQv39VhpXXsPdPQaQa48oCR8it4+N0jXN2u4tr1IzCKaLTcWLnpkfExiI17WKSNkQXQj0lMbUr8cl7M4CrFJSaoZyp4hpsdNWjCg0aaFwUPMkB35GJpPi6MQGnsKkqc/uxBp4s6hmIV6A98YeJuv49kUhfI0o29EfJpU7R+Sv/DEYP+FNfjWD0f6Nk+EnFNYFg5ZqZ2BZqOcjmJke2gSD93gjEBA7iFe5i5sSAY+MGZsxHIDgNYy1ex9rCFZ7/7PW5dMrF8qYzlpRzyWboDaD1QwUSkKd0khNQkLU5ipRI4xzShiiVMZK82wn61h+bARspzJKiOz8mA8sl3h4BNyM5kZJxWwg0D0/pDG/V6HHMzSaQonJkavEBHo6XB7nZgeh3oGou+vmiKj1bC+T8ZPko3j27GFXM/g6AkPDKSTGhtCVm/nuVyLeSyR26F84/m5XdIt+OiRywty/VOxD4CNNEvzGhvWq0WF9Jizq41WMmN7iHWa1cBHsziIL5DqZhQVipxFby87x/jCtKWboKdna6UyiVK4s3rZRW4xijQMzyfH2Oc0z6mFHgbFLgQc49Bk8hzvjtq8/pxhk7tulUNJYiucslAftZAr6Nj5/s+EsEutLUDzOoDVIcDPFoHVld9GDHzOU0n2hC6PQfbu13RqiPmPOahotUQe5oMQzR4Yoqz4Ay5lkxazVu0hvHUIzrQXN/te9jYZTQ12QGZnQrMoh99ed7CQiUhzbQ7CvBlfsZCMW9Iqpim2ZifSaCQM1UkP/l6oBgCb+JYF2aVOZr+X+Ki90cJXLtZwdx8Bt9/X8fBXhspS8e+XYJ+4yqsXEKgc8/zlBhoZjJ48MoH+PabAIXmI/z612mk0gmJL4iCD9km4wwYENjpDJCJov5O2FlJD86B5nTSgrnqu9WhuEGccVrgaOBADz14gYa2bSAVJzNSWj37I63TVoiB4+HZdl/m2up42Gv48PsjWF4b5mtm7BHtGHPB2vKj4cRiiH485pOCpNK01cKhf912TcwXUpLSFq29Y259LacooLKsbLs9wid/2hHB9oNxCdfhiLnX1OwtEWZbraEIp0RWpAWIPna+J/TdX77EAEZD4gVey8BesZHI9//wUV3S9G7fLEnQH5uNAttesYvp7VMK/KQpcCHmbhmA5lJrI9M72nTPS4nzCNbsx7GBvbGffe6yieEwxN7Xm5jtfIGPbndwZUGHZfr4//omvt1J4V51hKtXYhiOjrTzSIliTe1bN1gsYvzbOIWMGwPTaKo1Q6qbiZbDwg8Oo7cJMypKt3CYSebOdnkvtaBU8hK4URK9jmb7ZMqU38h4g3Co/O3UjcXKqYK6lAig2qa2TobAjVn6kypk6jcCmViMPgdQrY1Q74V4/8GizIXBboyqbjfzWHvSxqZ1E7GFy6AP/SJMRMzz6QS0yof4csfDL3bXcf0qYwyOtGERagDs7HQQuA4KMwllaXhhMQgrpB/SVwAzZHqkGSPpLy0nMVOiBYTV2TzsbdVg6T6yyQDNvoHWwBBkOE6a1/CPleS0YIBBY4idHtMMdWieD8Prw6T74TVq7JNTocma2ns/UOb6k8wUnC/T/3phFvky0zF1idOwB0MkU7qA51BgfNX0rMmxHfeda4j+9Bs3isjl49je7sm6lOpzuYRo7hTUOL6lpSzm5zOy1hn0yXvlN9HilcXruD7exjl5b0NCPHsSkc4AOqH5c8ANb2Nk0z6nFHg3KHAh5r5cMZHd0lGr+yhVjjb6c01JTGoqT52bdcQkjmtDAsoCYOeJD90EFm8mMep14ax9iTvaM3z4nzxcWk2DmOyeGyD2nYeqtojff7EFQ29hablwqIlE7RNZigFHh8x9/AM3NTJJ+us2N9p4/LiBVNrEfnUgyHN0SajBqkFTuyfbZfAaD0YWE8OaAUz0j0cpRDTPs3hF52B0GBzHO8nkWEpWar5Lkxoc7/noXtJHXOvEgyeeNUK0Oi7aAw03by/g5o0Z+ETM80Kpd7637eHbzjJw/QMkUnGExNC84MG0OV33kEk6wohFoPFJAsXcKGzQUjvojaCHBPNQADC0NhweDAw0KKyMGbOknalfOX8+AwbiRXSn2d0yQvGtB6GPRt9EbxgKs2ezkcCTMFkb3ocX19F3ubl70KHqch/2/Rq/kNkRVjeTsVALWSDGU8/l2D5CeabZYg6//k83UMglJNtCyoiyGEuOue3PP+djm3kNJzluWqAur+bFz//5F3v486d7uHd3VpDviF9PDZ2CC/HjmSNPYZBuppjxauAvr2H4xzbBOemajgcfzEkWAGtEBO8xQ0WlnB170/TklAJ/RRS4EHOfKel4fz6O/7UzhFMIxf+rtNCzU46MjTjedI3xXvrSj7XkCu/UUNsKYfcNrNylFmzDf/wpPrAe4sOPirBSWQnAGrksaaphteSibrewsT3A/7O9jVs3O4dpMWSUZNBKK1GMhd+jIyAEa6CJNkDY0VI5Kf7HZLIvQXXK1M5UI6VCSoqd3K4KR3AS9FMy95tD54ZJn/l4GhKRHAkIBCphPnz9oCeBd8vLGZgxDY6jNHa5aSz4kPlX6y5sRyGShXoM771fwfVrJREs6CENPBdfPmziD09KGF7+G+QrZdbAjaZ2oU8aF0znAEu5bUH5soeKUXCejDmg9slcatbEDlwXsfgQ2TTVajJ/1SXT3ohQJ2QWK8Q4BXA8Il5H4UfmMI4A5zmujUwiFGGn2ad2rpDsKOioR6ayBGbzIfK+h3VPx6BvSZW2sQhwoTlHNx1n7aCgSbx3ETBIg+PK2k7Mi5UEaWWghSJmQmIN+DM14og+UX9v8pNrnAIrUzLvvTcrqGlffl2TOA9aiYif/vRpS0oh332vIgIr538cDd7kOM/aNgVCjo1CNwUTCtcUFHmo387a0vS6KQX+fVLgQsydpLg0ayB/oIEMNZE4H3HUxqgKxtDUzl2bzG/MC55rjPnRrVqIvXVi0AdwanvQ61/hZ7M13L9ZRtxKSBR6dC+j4B/csnBprg37jomn6yV8/bABx/Px0YfzoqmwFjeZA7UZqVL2XI/jcWia1IBfWclJ0QmirxGXXCwDMR0HdRvN1hDfPazJJk0Gzs2Fmz+tAr0uNfQQRKlj+h2ZMzHj/VEgQXJExbMdatsODK8Lz6Mml0bcNNB3j6J8uVGRNrvVETzdwu17szJu5tITCYu+cddx0e26ePRsgM/3ZuBd/hXySyVorIgWEeaFOZ7tn+yZDLaNmNPF2hpNzV0Q/56R+vTX0kT9xVdVwQmYLcQFmW4wdESA4gNlahYFIQ5jNI62poB01oPjL6ZIDx3NnkppSMWYI390aAySDGhB0iSwkPecvYejdg6/0VISI3hM/FjNnP5ox9NgI5AgQAo4UdR2lNan2tIQi8eE20i1PEorb/kgCA0Bb1g3noyRgaPU1ll/vtMeCmDOkfD0lgd7SvekN4Na//LpnryTv/jFAiqzqcO1dsqt05+mFPiroMCFmTtdW5Emdm5KcaenyS+uodtUAUcsw/niodE8boeofvUMVnMDS5aDhUwbN274WFrIwIjFDis8RfdSmme6GtHEuAHQLH7/XhnNlo1vHzbAAi/u0EG+mBK/eIkMkn52pQpGzQhz4Cn60amJLy9lJd2GQUjlUhL1pir6wtxgboasjc5qXYW8JdChjE7eXNvH5qMNZar2gVZPpXUd7CnmM3QJ5OOhnALsgJqdAY8AJ9T0aeYnlqzGcq8jjEITHz5YEDQ8Mksx4yPE3p6NP3/j4MCbwTB3D8Z7K8gVslLi7XVoXWzDjOlYnsvh5o0YHn7fxvZWG5cu5aQACl0BLJZjxUKkTVdlGrhH5WF4P+nIMe/VHNjDseByRvVKlgrLcKaZcaBhq+pDD90XmC7zswHNsZEKe2PGfsyCOny6L/lC6048Lghy1MwjOnKNcE0tr+TR7lxGba+NbrcL3XVgaL7g38eJQEeJlFHxnoZ0JiHIha8kY71kuOf9WQmhmpQOjhmsDzDEp5/XxMq0tJiVxfmcW+W8HfxI1zcaNtKpOG7dKok5nu/qu0TnH4kM026mFDiWAhdm7jSli2X62GZffpJ7u5XV4G8GUvjFSv9wM6bxlhtRLOygiKe4m3fx/r08MsWCBMlNRmxP9sixMfKaOjiD4CqVlJi/GQFv94dYmTHgeDb+7d82cOvOHFZXcgpoZpzKxTvJYLnBUQskOmgyFUMub6FSTuHa9aIwYUYPX79eEi2VMLQEJ1mYS0tFKHvgYP/JEHFvgDhLT3oa9MCEZYZIMMANGtxAhxsa8umFhjimWWSl22cKXIhMEqg1XVSbIe7/bF4EFuYoi2bKHHzdx3Y1xNfOe8jdeQ/JdEIFOLKw/WvZ5YghABh+H5VyDLOVNLb3huKmuPdeRcy8nAeDDb/8bCBR9NToIzTA6JnwyVIzJNfdO6Cb4MhNEV3Dz5MUejUVClEBwlFPsNkp90we/KehEUiIV7/w4+SFZ/zOsYiF4YWmyOBnZlL42/94RVLJagc91A76aNR76NRb6PV60EJWtgth+wkMnVCw90mXd+lQ6zpEZ+CA5nkKqPfuzYogEpm336XxTo4lkgsdx5PYBcI9UwDjnKbHlAJTCigKXJi5y3s0NhlfiJhEv0oAZkKD3QEyhfGePPGC0mSeSGmY/+guWmuz+JftNdTsXXxwu425uexhUNfL+mcg28Nv99Ft9rBSSYCQrmQwzLP++vNN7G7lkMklBIO6MpMSPzKLoZgGkeO4KY+LQ4z939xIuPHzj0yL2pykzRmquh2LzZhSt1tDwjAEQS1k/XhPRyoRwPMMtLo+HI9oaj5GdgA/JHPcF8ZJDX8wTGNlMQPXN3H9ZlmwqrnpTm5gvufDCXUkZ0vIFNOsoStWiJfR4zy/06Cuc74B0GgO0es7WF3OwTANWOPa5M2msnzQMC5aughWz/eimKWKd6CvnsFzz9nWacwhhxG6jtlztBb4yaA8BjvqpK0P4xgmPhYZnu/4Qv9ihyf7m/kM+NwLeWLDl3D9ehn2yEN1v4/9/Q7ajZ6YuOeLWZgWszU8yRG/0FDe4E2cB3PEGdh3n4FpqZgqj/wG+3wdTXPcXE/E5q83hmKej8fpG4wWzOvoZdrGlAI/bQpcmLkTQcylVndBhYQvKBlnOqdhOKBfVpNIeJ5/7ghDWNkYKh9cQvtgEf/y9VNs/MMX+D//to3llYKYaiOzaXTfZBuM+B3YxCgPcGWZwCFx0bDJmOdnExgMfBzUGlIytDSbw8pyVjT9Wn2oUt9kPEeTlG1//B/2S01O8tAFwlYFSfGc/KmYO9lzCD7DwDIytU4faDZ7MAPWPtcO47sbLWWSJxiL565gYXUWV6/mQFMpc8knNSrGCwxaI7RtE4mZLEKXqHev26c7Ro5LFdDobSLVtMVNQbAQWk04R2rKQv8jEkWP4blPkozDa7SHGD0aiRCTGMO7Rs+LzL3ecqVdRtdHB+9lEiJLyXpmFo7rIYPOa9HQoz4OPyVGIIDvnB7wFj1j3sdxJ2K6uEyuXimIH7s/cMU0/4jpj++Ar/1wfuMvfPdobarVB2LSJiY940l+SgcRJlnYhuZ4FaL5Uxr9dKxTCrxZClyYuWdTAC3p9lHs17lGyg2dwWes5NZtBGL+FXPbMa1wc9Q0B6V5HbHUbax/HMM//vFf8F+zPeSLWYkuj24j0ybWONsiM+EfU3uIEz4cDZHNkFWolCyOIZMxBVAl1H3J8yUy18OHDXz3fQPElWcwHMf54sFW6PN7+LAufnEKCcxtZ54wD/qpozQxap0cj+NraPZVYFlccxDTHPVD1LjOVkMwjTzs7eO7v4Ro1yqYKd+AZVlSd5yXsi2aqPf3bewP5hDPF9+M1iLD0WDlDNgM2muPJFLcskxFwGjcZ/xknEa73sVuoyrgQrSM8HlFChetBE4QQ2XGgm6Nq+SM2+azYk33UiGO/aCMoRPACtsqeINtnNUUTz/DidczlgHwEgUU5xZQnmHchkLeO22KZPQ8+Emhh8yesRysw85a4mKlOK2Bt/Abx8iANDLG2ZmUjF3N4i0M5gJd8rnwT+pCiCXtAo1Mb5lS4N8xBS7M3PNZHTlThx0w7/piB/fYWFylQXluKCb6k1ri5u67AbJZB8u/uoSNz/v4H3/4E25eHkA3yWwUj6BlNyqCkWFJzUJC8LXNuAm7RUmEjOBoxNyQ2bZtE9c9xK2bZSwuZMWM2u97YB3t1dWcRKiLaXi8ofT7zAvWJYWL6UVampoQpQCVXsdAKqLjBZK6xo1fR7vjwB3aMDVfAa3QrDB5aAokx9A8+MM2ttdCNHvA3/7NMmbKSTgu9RPmWgNrT5r4ZCMDu3IPaU46vKCUNdn/5HcKJDpw8CyA/aiJ4n0VqOiNxoLTEQkn73rpdwbDxbwudILAkFoT7YglRE8DIfEHjMNANjbKyzjNhbwL17fQqOdhBK5YTQhaw99edrANL4yJxMdnoHh8JBjQXaLBqizjw1+8hw/uz0nVwJPiOk7qi/Mh0zk46OObb2vI52ktYtW2k+54e+cpk9CVobDk3944ztMznxmtDqTz7m5fxp60TBFOztPO9NopBf69U+AF7nL26RJaXPM14SmySZ791sMrudnGLSCe0NDvqA381LbGDD6TDVG6fwtfd6/jz5934buOMF9u8NREiAy3vtHGk7WGMvHSbJqIiZ+b0drcfCcPRrz3Bx62xf8IiXj/8GfzEmn/9FkLBMhgbWhqOzSH0x/PnGEyfQKBMKhudSWPTDYu5kFeNxoF6Ni6QKj2h8BBy4fb78Dy6oixoEn4IrBMCEdLo68VYXsxDFCCufxr+OUP8b8+8fHllzWp48lo7OpeG//0rYn90q+QWlyBRsY+wSQn53bqd/q3dcYJqHxxmsL5p7GUqGHKpzMMYXdiGI1YSIUV9AhNSuX3DNz0hc4pmvC+yOfOTZqxCtEfactzEmsv16l4BtIzwgboDQ14dl9Q99LzVzB37Tr8WFEAfE4lAkF+zAySc6tIzizBMQsYBgm4vobA80RwHGg5pCtLKJUzqDds7FUHAkTEwEmO6yxTjq7pdB2k03Fcv6qsPy+6jl4gzY/+T6bz0dXD96XRGql4hx99FOfrkOuAi48Wh0ePGtja6uDySl6sSd476Po43+ymV08p8HopcGHNneXR5/M6NoehFDNhsY/z8hcyCQLZJJgK1mO+1NkYRuD5SGZjyL9/F+HaHhZmHVQW8xIMRObBP7749Ccy6p1pWNdvlEWbYuBThnCwgvNGPT4EGeZcOQHTMkQYIAPjLzTLEwv8T3/ew85eF8TcZm55xNi4kStfn9LWGTX+dL2Fg1pf8uAz+TSG7RFiuTyKxEJvt6DLJvQi2VnfOwZbz2IQzsCc/3uU7t5GeWkVMJP4frOK6md/wf+dbiOWSOJ/fJ5Gq/IAuUurILTLhdKWRDPXhDach+cCrsD0agg9G7rbguvHMbKLaJuX8HBzHaVcQ/Lqae2QjfYca5Frw/XoHz2ivLKiTDZC9dZAoxOAYDmsBc/qdhwfC+lRYBraA2jDFqwgQEzP4sbNOcRiJp49dBD3ulJBTkwOk80igOObSM3O4v33lyRHn8VQ2u0Buh0bw8FINOvFUh6LMya6BzU8rg1Bv/nCQg7XblSwsJhFUgoHERr49JUeCT8piy4f/TnrxHPDeov/8H1fCgCVShaerLWQy8QljVGlWL7FgR3TNZ8/BSxay779roZWayRpoyzZPL+QeSfpe8w0pqemFPhRKfAilzlz58xBv7pg4uMvRnCKQIqVu85rGaYJU0qVahj0FNAJX+SXSgncWwMfmUoOrd0VbFYfozRDwBYDIcO6GT4nIDWGaHzUmpguQzN9u2OLWZtMWVU2U+ZeIuSxmEl0kIExipj+ZYJ85PMJsC72zl5Pqq+R8XCcYkr2idpm4OrlglgN2P/l1Th814eTt/DRL5bFfP/fR0PsPXqIhOaJP171pRgFte/QC6HPP8Dsb/8LylcK0MFcesCavYHdj4f4/Jt/RaXcxYH2AVILl2DqrBl9fnsvze2abqC1WcNw/Sv46avo+0uCPeAN2ph3P8VqZh2Pm5dgLPwWS/ezaG/PYv9gH/E4QXiIdHb2paOeqQbP9eEz8E+OI1qPTyihSdPhDfoYdG2EugGDdcbHML2B7yMGBzHdA51BrYNVj8UGAAAgAElEQVQ67EEZt24W4YeXsb+5j1G/jZg2kmes8OUDiKyWmcXlK7OolExJFyzlMnD8jKQccj5katkUsxlUWdehDeyuNzFqVFHbrqK4UMHVazOHLhuujZeZ2snkuY7exYNjo6WEa5ZQtEQZvHO7LPEjL33/fsQJce3QysCKds/WWxLDcOVSAeWZlNRRoOWBc5keUwpMKfA8Bc6+Qz9/n/wrl9aRCnT4hH1NHnPBS06pDQaIJzW0a4w6B7SJKOnTbidT1fm/7Bx2G09we+QibrECHCFBFbxnt++I+ZxSPzXwbN7CxlYL3b6NuKkf+tHJpTtdD3MJBu4d9crxMWWcMLQ0vzPwaO1pE19+VRUtlIU2yChpGeB9zH/mH1PhnjxtIpYw8bMHC5irpMS8++vf3sb/7Ntob64hblCDpX+AYgaZSwA/NY/CvY9QuZ4FWGCGEfi+L6lf2WvX8f3Xu9DdP2EmX4Md2AjD1NFgz/hNXBKajv11D90vnuCK+SW223FoS5cxu+LB+/5b/GZpC5fnA+z/cw9OPMDMlQyqm0n0bSCd1tHvOVIWNDI1v9a9NfRhaiEShqMEJ1fp97JQx+4U0k1jzrs7FHCi+YqFB/dmsDeXwdO1Opq7VXhuB3E9gE/bhlXGleuLWF1Kyjpwxoi8zPTIJLVxJTsVAECGTTGzWEigOJOD3WlDG7XQeNYT4WFmcUa0/+XlLHQKV+8o8z7LcqBAQyhdCr60qnDmXP6v9XmeZSAnXMOxUAB5tt7BkydNqV535/YMomwNou1NjykFphQ4ngKvxNzTcR2LaQPrLrXmCa54fF8nniXDIWNnvXGpMnfilT/8wRw70J/rnyVB0zE4Ix9rT1t47/aMmHiJVre51UY5zUIoBkYOncf8P4PUlMbGje1oJup30c6DQKLpc9TgH9alulY+p0z0NNOrawjFa2Bnp4f1jY6Y9Wdnk6Lp0ox943oJ9t/dw+9/52HQqMNMJOEGBjTfgavnYF79NYziLBpbfYTOEDFLR7JgQQsC5CsJ2KObePRVA4gXkCahXkwU/yF5njtDKwmL2VSfAduPh7iR6+Oja0m42y42DR1eq4ZbhQ3cvZFEMlPEjSUbB8/+jFotiSvZNdy8npZ64PRHi2CjqVK4ZHA086Ziynf/XKfjnGTlLnnul2P/ETl3hM0wK+DYq8YPKXAE5tdxQ6QtYGUhiXJxEesbOWyuV9FtNJBIZXD91gpuXMkgZio0xKhJ4cvHmNjp5kgmdNy6kcezDRO9VhvZ+AjJsIPGeh9/aPTw4BdXcfNmSdbry8z0UX/v2icFUrqSuOCZ487Pd0kLlrTRzkje2cXFDG7fLIvgIWN+14g5Hc+UAu8YBV6JuScshTH/eN+Fnw8FuOW8mwOvJ38mr+ImyVrxZz+o7QfiC2XAU6RzkNnMziQxvJwXcyMjnhlB32raCN0RkokYkpaBeFzpKNT0aZol3rvsHscMgeMkDC3zgVmJqlxOYu1ZC+vrLVy6xKpzqiRsozEU8z219cWFjKC2cT4cE83oLNoRBg/w7FkD166VoBsm3JGLjR0PT1st4NE/oD9woHltDOJJOLfuo3jzKnT4SC3NY3fvf5Mo/HyMpVfPrrmQxp7jY/eJi37XwvL1EMuBJ0V/0lkHnu2ivbOJK78MkcpY0Mw4FsoN3D74I1bmC7h2JYlKpYj1zS6ePG7iL5/uwkqYEjTGwh1d1qZPKsz3SXM14xmYVaDrjEZXsLlnNM6cugwoLOiBi1aji4FdRsqKSRpaMqHh9o0cZisWvntcQBDquLqaRiIOHHoETm1Z/cg5ZFI6bl7N4rsnIbrNBspZIG+E6Pdr+MvHPnzvGu7cmUE8rjAIRIt/Xjo8Q09v8xKiC3ogGJKVIMrb2xzLD/tm4OXufl/w7q9dLYhF5acqSP1wdtMzUwq8WQq8EnMnH14oGQieMEqdaHOH/PX8oz6Gob68Ecl7gzNgvyrIK3r5qSlblqHy1Mm0NUYG+0gnFXocrzu0qI6rjBF1jGZA+lOPOyKccUZ3X7taFFP847WWYImzPjajeB8+akhe/Y0bJWlCNnw1PGHwZHTz82mEWjiG+4zJ+ZW9Hlr/7Qnq+y4Wyjnk8kNs7IVobS8hvnAViRRT9jQgkUMyG8CgFvpiwP1xg6aSy/lpMfS2nmD01aeYefBLFBZKyB04GIcOSFlX0+kimzRhJmKCRtdsO/jZ/SLevzcrUeVkzsuLOamh/cnHW0jFVelSFg9i9bPuAOhH2PGHc2ZRF5Wn6LoEJCpi5NowgsE43/yEQb/0tCZws+6gi2cbXbheGsWcCbWgQ7CITfJuEevbNvYOhlhZsESIPOHRHtsbq6VxHUl8BVmLMG4NaSuEPWzi048fodMd4vadimi+ElXPeIZxupbENlxoXR87nNd+kgIfTdv0W1P4fRcPWlFYc4Ew0tG79C6OczqmKQXeNQq8GnMHkE/pqCQNtIYBrHNWh3slYhC4Qg8RKxWxWctj7ekO7tyJSZoVNwFuxNycuXkxYn408tBo2oKoRnPfpJLC73Q5dptDHNQGovVzMyGzZ5AdN3hh+r7aCH3hiAHm5jIY2L5o79lsHAc1W4SMe3fVZk9Nn3s7rQrVgwGePWtjfi6DkUPsd47JF8s6+yfoyfv3Z/AP/7iFVq8rhW18MuXqPuzPPkFsPgbNHwG1WcTKl8fBCZOzOIWaugF3YCPd38S11Q3sjYroNe4JYh5LscZ0HcmUg/lKiJgUwtFwcDCQqmFJK4bPPj8Qgcd1GJwWIl9IgvPNxhyUCzpoFjfK9GdzWkdj4tzJ14m5z+eQiOvIZk18/ZcWgmH/1cFdpNyqhnq1DcmguJ5HNqOEJdI+belYmbeEwW/tDeU7BTSuh7McvHYwCjAaurBivCNaNxqSiRCa08Z3nz/G5rMDVOaymJnJIJNLIZ2NS8VArsN43JS1w/lP0uYs/b/payiEEARGnt87xtw5NgodRKDj+8e1dLan9qapNm1/SoGfBgVemblzA13IGaj2fAQ59RL+aFMPfVhpC+25B/j99wOYRgPXqTFTJRlHAw9HrpjJqfUy9z0d86GVn98saOKNx3Ss7/bw8R+3JYKYZVu5GdckJcrBd9/V1Ubo+OKnHAwcpFJx/Pxn85Iv/Mkfd0AM+0srOWxud8XUScbGHHIi5BGt7Pf/tInZ2RSuXytI0J0odWTgUipWE5N9t+PgyVpTyqq+f0tDMVdDOtMSbRC+i4+rG2gNMwiCRYokZyJ1qBnwBx0sxvbxn/92EZ9+X8W/Pf0WzRJgVuJIhB689j7KOVvM16ytzjGz2hstEd39XcTgIWA7gQYrnQTj1nOrKTHNm4YqhHPcYLhJyyHPQ0NvMAaPOe7i85wLAzhaBulcFqUcAyE9QZebbELmkTawumjh6ZYNXR9iaT4pIYxnYbQEC+LBHGrt0MyjzjEmgEV+4oENtzfATruK9Yc6zLiFVDaFwTBEIpkQV9Xyah7ZdEwyKlR099tnU1zz9F3X6rZkhGTSMWWZUNN76//l+CgQEU+CWS5k8KxAOD2mFJhS4GwUeGXmbsSBclqH3lVBTiJhn/cd5PXnvYfzk3s8ZBfn0fb/Bv/w8Pdw/Tpu3SxB00xhULduzojWzs3h2rUZbD6ri389nTQO++RGX8iYmCuarLAq2ibTtgiGU6vZAlZDZY811Km9rT2pC8Jcv5PAo8cJMTtvb3WQjgfoNQO0ak2hPnlDo+PCdg1cvVrAnVtlAUZhnu6VywXZsCIzMbU8YmXfvFESbe9nD+ZE01VVzpSpN/Rd7HYGaHu2BMYJ4zwT3TRogYtYaCOZLuDB7QDbe8/w6FsXswSt6VWRrXaweklDMh2H4wQoFi38+ldLYEnQtfYOkjrD1glgoyEIRug4MaxvhbASBrJphQ/wsiWnBbxXBSmedG2oMRJeTAAnXSKBhA6SiGeKmCvpUm51NC448+JNdL/ksiYW5xLY3nOQTrko5emff/HKo3+TrhznXt1Fu+uha2swXB1Zi8FnCh2NV5P09AsndGIl0FrEin09+N0eDNZ772n40z/X8f1MEbPzBVy+UsbCQgYmXUT+203hojWp3XYEQplrjvn4kUvriBJv7xvfSb6zuqYrph4JiW9vSNOepxT4SVHglZk7ZzufN2BtaRgNgaQVMd2z0YEbKTdJbqZjhftsN46vItPVNRfF1Qo68d/i98/+iKG7j3u3ckhZlpRzZds0sVfmXNQOuujbDsjcpe8xH6EvPMHAKEMTVLF02sTuXl+0GQoLzHfn9dR09nebmM3G0BsEePr9now7bvi4spKSdiO0LG5OjNDePvAFwY7lYP/0lz1JIyOiHRnDpAZJ/6LMh5saXaDCBBT3ZlsIdIQaLQpM7dMkkvwsvJ19+K6OwDeEqWSzFlbKdaw93IbjzuPuDQM3Voe4zNz6sW+Tc+WGT2hSjpNIdsz7o8BCZLOC4aHRa2N9y8B7N7JSAOes5u7THzCtLidxXs42xCi0YKTLmJuJIRlzYTtASLS7SNWe6IB3kGmVC3EpErRXHUncRSJGK8Tx1OPcaWHY2R8il43h0koKuzsuWv0+MkmVWcEu1Gj4TVIuJE+fPIg0SogZP4Tj2hge2FirVrH+OIvrd1Zw996crIGo+M7EcH+0r6QV6cL1ls+xSuK7d3CdcQ0yAobrP3pf372RTkc0pcC7RwHuQ698LJYNFDUD9V2VViMOsjO2Ss3VGYXQCUM6ASJzxtvlMjJF5tHlFmah3fktPqldwf/8tx7WnzURuISNVUFdwjjJAFjNTG3JsmkItOm4HjTN6PyR1/KP5kFqOZObC8dMRlbMm7i0EMdswQCjtBlARX8+N07503WUCgmUciZ6XUdKVDIPnuA5bJP9csOK/shs7KEnpnyDGst4843GItoMy8zGz77RRZaUwNehmwkxSVPYYV5zLqNjpmhJWtut22VBAWNf0cHrXmTY/JnkprCUTXjotPro9DyhcXTfm/lUjGgUpmCmy1iYjYsmrWhDYUjR9Li+1ZxCzM0m5Dnu7DsyL66Lkw4GDzLIbGE2gasrSVy5XESYLKBpJ0ST57MaL5Vjm5AlGapgvExaQ87yMOq08Id//B6/+9332NjsHK6tYxt4Aye5zsgwKciyDC2FV6bAEeFPWcHeQKcXbJLPTKwiRAX0lXn+gk1Nb5tS4K+SAq9Fc9+q+mixqEtJ+brPulFwc/WcEL1mKKVfyXcnmct5nohowL6LZD4F59Z/wHebS9j5fg13mwdYmQkxt5gTQJjAcZFMh9g7sDEYasjnTAyHHmbLCYEl7Xue+PqQpEkQwmzX19uIxVU0MU3qrsMSsgpvnMh03OjpxWdmmmI2auRkjNQ8qL03Gn3xredycfH9s4JcqZgUZsOrKUTw4EbG1CQvCMQvf0QPlTCumyHMGEFz1PWqp9P/yyvJbFicR6qWhSEWF7OiEdESQYHmvTszP0itU3NRTFWEhIlu2B6104Q/ws7+AJl0DoQkjtwME5cefj0c8YQAcfjjqV8oZGhwDWYK5DBTMJCxPKH3WQVJjouWmcVKQgLsqnUHczM/TO8gWXktNXcGU4os6IdybT4bQ72ZRq3eR7PbR0JzkbSY4sjBR8F2z09EphpqcAIdTmggHXfR2dnCv/5jF9duL+PO3TlJ5TxvgZrne3n5vyhMUljc2etL9cJOZ4Rez5VMCAaLvun+Xz7C465giWQdX39zAI73o58vgLEw7+ZYjxv/9NyUAm+PAq/M3JudAL/7eohBMUC5rJ9sUT1mjtR0nSEw7AOVFYVYcqJF9pj7f3CK2rbnIW7FELt1E8PeEv68/xQPHz/Fe80WipYrEfPtQQr1XhKdegfJWB/Mzv7NgxhmijH0911sbnZQuDerNhIvQL0xFFAcMjRG0xtaANM0BdNdYfcouNsjRqxGxo1d7ftkkITfU+Vnual++XVNasvT7C1MFCFWl7MCq8la8vT1E/b2UKWirzcIxB8eiMn+bBwyuoqMlUII/8wwxJXLeYlNYMlaweDn5IRTqjsoPJAZsEjO8WxLBQsmYz6aLOOaTeDSoqVo8oMHo06wC0Y/B54LI1Rlfp+/lOVuFdrf0fkAAQx4sbyU950raKALRLBnJqweR9ef/I1+9hxjK2bjqNYciRfIZ4+vKDYY+gJkQ8FHSBMyvkDDyqKFmVIcuwdJNOsDNO2hBBtmLCIJHj6tHwyCAgMFhVIqhGUG6PZq+OyTvgS1/fyjZbGE0FLyJg4ydj7Pza0OKKhmMjGJR7l2pQhiy5+U+vkmxnLWNmkpY8Df7k4XTMm8conuIQUWddY2ptdNKfDXTIFXY+4h8GjLQ9XwUZ4dO8/PSU0WKyF3kxz5c9570uVkgggdJNMWgqvvY9i5hM9qj5FsbsGem0VQuYXADuH/6X9ib3eAVIbwsDosS0fccLG+0cTlyyxEoyK7r1zJq/KfgdLk1jpdYXzUeLidH2qkLwyIwDy9voeDhofLRZYxVbEFyWQMldmUAOmwD7aws9vFzq4u/RISlOh6kwfN4NWqja1mBl42r+BXJy847TvR9+IBYqYL04yroLgwlFQjugYIl3vcIWZ50vIkU4yY50MYoYeRzch9cltKWD9sTeZO64HAnGoIdEZAv9CvRuBXmojH+LAI4YVxDM0i8vk0ChlfrC/2BKocBQ/X1aGdcSXTmkL/e7vjodl2xbUiU5wYMseaz5iwh6pIDN1F5LuCohiEYgG4spTCwqyFds/Dzu4A3V4b+SRLzx4vCnGqnC3rBjFBMpPUodk2Hn+zgcp8DjeuF+GMzpb9MDHUU7+SGfKPWi+rG1ZrNi6v5nDjWknBJgcKpvnURt7Sjxw3A/6qtT6uXsrjgw8qSMQNAZJ6S0OadjulwE+KAmfcEo+f02gYYuPAR7JEBqHM0sdfefxZMWMHGPuQT2KRx9/70rPcjH1qnQGS2RS81D0MRzfEvB5L56HvbmJgjJDQ+zCQlKT4dsfF1p4NPxzh0892RaNmGddCngAoRFnTUKkQvzwuJWKTjLinQV4YpCpdyk2cGiL5pTMKsFMdYm6xgAcfzEt7ZJjJpAnCaRbyCXgu07hU4BD9sFubjKT2kUqpRxP9RoS6754OUcV1lMsF1clLiXCkjNOiETN8YeQC0UP6jP/O0sxJ15BXx+MxFPIUGpQJ/7hr2RfZHml56dZlhIHKX568lvRtdDx0drcAh4iBBrTUDCqVCjJJCkYBeq4n5XLJgHnwY0QhI9TPVB2P44iZGhgwSYwCPiu2xfM8ZJxMjYzrGAyPItonr1HCADV5HelUAoam4fGTAVzfVYF0xwg3bDfqg/34oYakpcGze/j2q20R9hjY9jqgVUlHEibwQsn4+OqrA4FF/vCDCgoFS+INAveYQSoSvBP/5RSGQ5UGx3egVhtI1so7MbjpIKYU+AlQ4JWYu+2EOBh4MArjDfK8E+YeRGZIheWN7TWEqPUVdnw6JTts6KkgO7NUQczV0Xdm8H1ziIS7g06jDyuVwF/+tINEKoH/679cl+IazLmllkszOv2/7b6HdMYUTY0aXt8O0On6sOKGIJqxbOxu3UU6n8MHHywin7eUpmzoKqhNNntlV+bmxeh5gr2wQAZx8blBk8Gw/jzx8JvVNp40i8hcu45YgmVZz0YwTdPhjhyEnRqKBaqPZILRgwrhkyFflPhj/zTb5Fhf1gyZP9PQioWiik+IhkEmPY7Of7Y1wHc1HcEogGtkMLc4iwfvl8TkTWAaV2ISjqwlpFOn76HedKX4jq7SDCZaVgJAdIJUY3xEOmWKX71n+8ilDbHE8BoKC/yjX5fpkDz43JkzP8ngeZ5uC/ndVNfIP475D9sTgUB7MSOEVegCHGzs4ssvc/jlL5cFb2GyLyUUvPxZkw4cH4VQxpCsb7Yx6LsSOJdKm7hza0bM8VzHb8r8f8zUL3yKT5guJAqDnH2rPcSCmxFBWOJrLtzy9MYpBf46KPBKzJ0BSmmTjC5AmD17BPckaeNJxeAH3RB5i9vgGzq4S47xWoMwQCybQ/nX/zv2n3rQmiYaiw66zzYRmp8jPVxHNuEjGQfW1urY3+tANxSULUtjfvddA1bMR7fVl6C7gUMNyUe/M8DWTlwi5/sj+o9jWFnRsLnehD1wUSha4t/e2Wmh3x8eblwENiHE6cD2sLvbk8hzyR/PJrCx0cI//9NTNPwSMr/4BTKVGVA4OfPBEqrOAOmwhuUFBpApvyUZATd5MjGBSb2gfMWN94jVnmFUolSqPkXBlPx0Zf2INE4HFuwgD9eJQdvv4NuEjspMCuViXHzmGlP7xsGL9Ccz8K3ZcrG976DZZq758+NgoN9kmhyFKaYrUnMfDHzk0ypYknfxVubZZ1KmtLW5S2AfA+2ui3IhhvnZhFw02UXU3+mrl3C5kL9ICGIbhq4jn3Cw9s1Tee737y+IoMTgR1rDJPuCD2sscER9cd7qnBqJ7wUigHR7A/GrE+p3bi6NSiUtFiJai4hf8NM51LwYyJrPJQQgajBwBRnxNIyCn878piOdUuDNUuCVmPtezQeIiLobImaFyBW1cwXUUYO0UhqyRQ21LUbbH+Wev9FpEyDDNBFYJbh6iPLlUNDXgrkFlG9dg7v+Lxht/hmzpgev10OjE0pRGfpTMfKwVInhYJ/IZCPESUFXQ4LaKwK4vR6GHcBFElevJmF4A3zxeR3ZXFJMotRAnOEIZjCE2zPFl7u104U3HAji2cBWULif20MJgGrU2hjYOvyVj5BcvQEtYCnU81CHGqcON4iJD3nMvp5r4FRcwdM51nPtnOUf7IvKNRlsEGi0vgsmf7s7QqM+wG51iGa9j2GniWBko1vdR7s2J/C8uXIOi0sFVMoWUkm6SZSGTc26MhOHbY9L9r7gyqd1whsF0q88Q2rQ1HLHpYFFVZ8YPLXsdNLE0pwlTJ3jJYJhq+OBEfOsT0DhIjrIbEQbfqHf6PfJzxcfHf9N4cRye3j81RoO9jsSyMmsCTNmoDyTRrGcBisQstQpBRUy8k5HlcQl2BAPliHerw4kfY/n3rtTEEbIcSkh7sWeJ0f17n3n+uDrxrWey8axs9uTdUK3AisQTo8pBaYUOJ0CF2LufLcePvPwu+9s9BMBkgUNAYFEzrl/8HrTBDIFDc39AP2OSqcLX29c0bEUYC3u+qYD3wmRzrK+uYuZJQOzl2Yxmv97NBBgb/vPKBV8rCyklZ+S1eXiCbS7PuxeH6ySyWpjGkupDjVkU0DCCDHyNHQ95jirYDViqhfTIbIJFyPTh5UhYpolNeVHToD6QYDRwEbG0JBMaWgNTKSTEI2uq/nIphPwY4bK2RdwlGOndOxJ+rlpqnUDC70+YxAUY6VpM9o8qTEnrHHU/vgZ8hzdBIFHv3MgjPTFx8v7Rfs+tmd1kteoPxVo5johRnYguPqsoHdw0EPjoIV2rYFRt4MgkQO8IYLeATTGTKR1qe2OQReNXh3N/SyelcpYXCoKDclomaY9U4wjLLw4QjUGBtFFrohoLJwbQYioHUeadDRWMnfek0kbSKWUyT5muGi0aPpnm6SislewR+IsGNo4yv/4IZxCIdVSgml3/gC93U30Qmr0IbxAR3XdBPQ4rGwGl67N4fadWYGy/f7zKqq1Hu7enpW2d3b7glewspQVNxI1+/P67yks8Z0873t86uTO+SPHEDF21l5ghUTWOWAp5VTy+MyGc3YxvXxKgb8KClyIua9tePhvXw3gzocozerQuaHRTDqhzZyVetws0wUN6byOxk6ITOS/v8AmedY+uYHYvRDteohChVjpGrwRMGgHaGwNocdSSNz8O9RawCdf/hE/81gkJkWdUzDDDxpD5tyJOVs2w4n8dqncpgH9gYvvH9fEDKvHEsispgQGtT/wYY/IMBmAp8O1A8ltZ0UxCjrM8TJjJspFC8vzScwUE7AHDh7ufYzuRha56+8BmnvmHZgsiDEHMfRgCRDZUToRN1HSgql3TM2juVs48RgRjDjy1CSHI3I7FWswSWMyw6ETIngBfChikko75zU0gXuSV03TKum39bQKu7EHr9+C73rQQpWZYCZU+p9YE0TFV2XWmHmgY4RgOEJ73xWTequRQLGYRmXGQj5njFnu5AjVd9HWjyzv4kPvdNlngEyKgYDqOvF1E4GPcREiBYXodHxU6yOJl6CrhMLcizEKTIeU63/Y9ZnPcAgUNDLG0cKnUBUGIwThEE6jiy+qdeztLOLBh6tIJInP4GN7p4dYXMf1a0VcvpQ/LFITxQOcNgA+Hx6RoEcNX+iuZJfTbn3tv3EoFEKZTUFX0fZ2F8/WW5ibTUsQbNBTwEO8juOe+t1f+yOYNvjvjAIXYu5D+gMzgFXUYEaRxhcEoKFAYJhAflbDwUaIfisUM/2btLxpBtA+kNgylBaIKAfkZjT02yEcmyZ4B7qRQbj4W/SGQ3z5zSdoNriJxkFfOlN0yklXNEZBghkvCtmWhR6aZISlzRFiWgA7NJWPmKlHAZmpg37fA6Pte3aAds9HcVxRj7+T6dMky0h0Iopl0zqKyRFaNmvbRnrjWVdiCM2Io+sUUWtu48plZabn5kgNdXYmLRHjrFi38WQPcW2ImFTN4y5K3AIPI88U87k5wXjYO7X2gRNZKFS2gNSykWpjIWrNkeQoM6DPHzmS356I0ZUToFevwm3uwjQUGJAC01X52EfsbXKOajxSCdDUUEwHSOs9NKo2avUklhZzWF2yRAuPmPXk3ZPf2X7f9gVSl8+AzFqEnIaLvZojWPmFnIlCLgZaViiI0UTPoEEy/osIsZP9n/RdzVsxXLmGDI9+d0GWCxH3HbS2N/Gv3R6sdAqthovlpTzuv1+RtEoyxvNo65FAwniEft/Fo8cN6e/WddY3OFu9gJPmcp7zHAef2dZWF7t7Xck+oethcSEjKYKBr2Cfv3lYx4P7FanvrtwN5+lleu2UAn9dFLgQc6+UdOQPdLSpgZGLdCMAACAASURBVDIt+fjd+MyU5MubKWpo7gHdRohsaWKDO3MrZ7tQlJIAGNkhjBgQi9P3C8xdZlQ5A8wUtHkQ+jDiefhL/wdq/xrHzt4fMJdrw9ANJHUDvZEB0/BhmWryDJymy5UblePSNa9BG9HUrcMZl6+kxkFY01G/h3DgwInpsB1NUpYgwYRjNLhxah0JS/MwNWqNQACx5PmJTd+tlYBZnkV3wAxrmtiJSKY0tnw+IVYJRvPXduvQB01kYxRCqB3RIaxhOGKOvoZU4vkHTaZDc7RPV4QbYuSEGA4D0cwJ9uM5Q0FxIzp4MhbK8MlI2t0QehhIkJtGyYrHGRYRDfs+dGhGTEqwphJAwvTQ6nexuRUinTIwW4pLYNlJq4HPh1qtwyDGmKoGyLKndENQb1V1AXTs1xyJwOf1rEOQz7FqmhKInqfCST29nvPSl/xHwSBnjQDDXg3ttoZYqOPhp110WwN8+PMlFCghcjGfcnA+tBIxav4vn+1Jtsf99+ckvoPBnDR/XFrOSbomGeiPcdCFsPa0DQarLi1lJVvESpjI51U1OD6b926X8e3DOv74p10Ui0ksL2VRKifHGQ1TTf7HeE7TPn5aFLgQcy9kdMQ9DcN+iEz2/PntL5KI+3osDlgZotWFcIcAq829CQ2Jll67T79viMKsYmDsh5teIkntcDw6Mf95CPMzCH75d2j/fh/9/meYm6FyaKDW1eF4GmayHgxD+V9Zpn3gmmgP6Yj3YMWYdkQNV224MVOhbCXjPlKGJ2ZYU1C3VCBh5L+Wz/E4aCYlM7IdFo2JHY3vRSKe8G9uz4YRIFnOovo0I5H/i8slBNTOCSrjQ6KoiTGeL6Zx0DGQpo9dUvZEERY0OJai7bQJu3t0kGYiLpgx4HFTgguZrua7I8Q1F5k4q6XxesU4+Y1Cg+DY0NE92dhRsy/9xtv4J7xH01BIh2j0bTzb7CFp5ZFKnq518pkwccJDKBq8xEboGjK0kORimC3HJV6CKHbb+0OJ0pfhcsKRg378jc+KWr98jr+/yBKj58nb5dpx1iDX3YvXvnTyYC15DUlaGxDAHnWw9q0jcR+/+fWq+KZPhWcNNTSaQ1SrfTx+0pQUzc+/2Jc1ViwkpMQqnz2rsdHvT+EyOia+Rqde+ZNCI59Huz0UNxeRE+n+oBAsiIqSbhgim03gg/crOKgPxBXxxZdV3L07K4GWFLoIKKW0+aPxvvLgpg1MKfATpsCFmHsioaFkGdgYqhraF9yjnyMbN45CRcf29wG6zVDM5Sp46bnLXv0frPjVJIY7+zsKpmL/x21emj5Cdj4D7/3foPqFi97+U0GHI747tfODjglCj1IDdzwDPkwk0knEEi4svYuEEWAgBWECbO4NUWuNxDSv0Z9BiNdAx8ilaZuWgBAO86snuB4D/5hqVxsUoFllYZOsnn6ugwA2mTxa+fv452/+jHvdhpSVZZR5acYSTYkMzshUUAcQtzcFbY3+EvHC0vdrNxH6AxnzUe+BIMiZWQ1W6CLu+0iQgVnKf0wWPKn8CW8818BffjGfGRlEPhmg3u/joJHE1ZWUil4/5nb6323Xl7K/XgBs7g4RNzUkEvT3klnStB0Kk48vsBqehlrDwdbeEKVCDEmLOAVKsCCjplum3/fRtQFHNxBzVeDgZNe83na5PhRiHOGLSQuWiY294OqYvO+k72qdamDMuGi4oYPtp/to3qmIRnsSc6eGvC5ASR3EEwbevzuLdCqOesMWZERqyN8+rOHx4wZWVnJIWuZhcB6XHJ8lgYSOe09OGutJ5/nMIlmJ1pzV1Ty+/raGz7+s4vbNslgOJv3qnBODPi+t5ATw59tva/jmmwOUS0mx1NACtTCXkfiDH8vicNLcpuenFHgXKHAh5s7dLZfUYI6U1H3od3+FGVGLSWY0mDENzpCc9nWIDM8PiPncnkvNPYCVVsh4L7MOEMqWG1Hx7vuIlyqoffzfYe59ilLMhW4qU61o6mYMpmajaA0xYAW2eBID14Ku2WICfvh9A8NuB66vo1KimUJp60wHI0TtoOegYwYYuUC6yOplR2P3PKKwpWAm0kpfPOKuRxed8o2bJH231up17O2kUNtZp3Eb4aiP94pV/M0vsmg1bWwNVxHe+w0aO7+HV/8UpfQIelzlxusaI8KViePoyVDMILgPUd+AOLMGxmOLPk8Z1tl+GoP9sFkJ9pJ/E2/++UMw800dVlz/QcDb5JVkKDTD0yR/aY5pVUC370plu9GINQM0QZ6jBYLzZKEZItExUp613Rk9X8iaco4Mp9bwxE/vegprX9wZkx2Ov5MhkpHTugO6anwNCTMUyw/7OecjPeyB7VJIGIxsdDo2gsXs4W+TXyLBingLtu3BSsaQSsakvvzqak4upbCn3ZkR8zfhaqlRM8iP5Y7Zz1wlhdnZlHy/KAON8vPZnlRz1FhvwJMSuEx5O6gOxGWQTnNveZ4qtGDxj/EA1Nr39vsSpGkgwNrTFjrdEe7dmZF3h2OfHlMK/DVT4ELMnQxxt+XDYdUzkz7j10TCsaWWG9Hzr/XraZ/tSmU0F8iVla/3LP2IBsHI6sUFxP/+v6L1aQ77X/0B+WSITC6OxZLKYX+6FsDUXSTDAfQEa47nJECw3bYxaLUQD7rQzQxa3RJGLoOzNGH2odND6LQwhA/H1ZFMJ0WgoGoTBB4GIx1aJo94lpvsWUb8Q3pRSKFmllldgTe/KO13G3188eQPuLHXEDOomY6jdOMqwssVNL9Ywmj7nzCDpvhllSFcsXVCp3IUDILj/svvk38/7P2HZzgPjikcuywO1UGBsGWUOHdnVpqxZbMPEjGYGIEV5ZnnTIFHjYlxCrRu6LCySTGhk9GfdJB89DeTFvTR0y3C0r1kGgye48HUxYg5cM3Qj88CM92eJ6h29ZbCGmBbzH9njn2LmA2aJ8LBcb0fCUTKQtSydcHEP2mcZz3PvmhdYLZBr6ui+imMvrhOhJmGIVZXcpIj/+RJC2t6iI/CEFevFkUjZ7YA88g/uF8RWGRmNnR7jlg5mHf/7Xd1NJs2rhGb/pg+Thsz6ch7+gMP9XpfglIjJD4i6vGZWJaBO7fLgqR3kvWBffBZ8fmtLOdUKqbO2gw9PPq+AWJGrC7ncG7r1mmDn/42pcBPkAIXYu7Nto92GCCRfb1cmFoU/yRCmGXX3wBWxf/P3ns1yZGlV4LHtUd46NSJBBKqULq7RBebZDdnaDtjy6e1tX3Y/7Z/YvdhZh9IztjuLLvZJFuV6KqCFonUMrSHa187343IjEykBFBdqGK6GZAh3K/43OOe+6nziZ9z6OOX+3XcSnzcjSQQIUJhooH+7f8Mf6uIQvhbzExluH6tBEsijsvYWIlg6zECP4ZhathqMYCOkd0hTPowNQ2x30XeD5RZXiuKpmnrGWGLSp0yVw6j0Um32otNWFPTcBjZ/So7Ka7waQLTYqqXhupMFf3gbXzz/J9R0vrILVtIUoxaDdmHv0DbbqD76FfQ+s8F7AiaueUhNwviHhDi2pRAf7F64CTVMQpVsWmLNi46sroRml2ErpsSpZ+xZKBZRGq4iPIYWc5cPhaT0ZFkvFLllsfMDNQdTE+VxGzOxR+G2nCM7rGAIIP/slxS81iKl4ArcxpmABaZ689RjW0ORGQpzeeaVIOrV3NJDZTnVIdkPJAdr7WtHppxED/uMeL3AnQSC3Heh++4lg4+Y5t0N9C8zdcntUq5N1uhzO/jj6YFTEmOQyDlPOWQ/H5b2mk06NnPVf0DaNjY7A0j6nXcud0Qc/j+dQfDeeEVgZj/CLxPnzaFaKdWL6BRsCSAkfKmC4AbZZIHMTvh6ObkaKPsl24UHnyW5+bKovUvr3QxO1Pa98Efve7y/aUE/r1I4KXAvdXLEZO16yWCt08ULM2lA7G5SgT7eRaNE9s64Qsuqmw3JeGOLIFnLcUHDfFaBuPtrYbYXncw/fP/CG3bxcqzX2GyFmBioogrswXstcpYWW4jS1ow057sUHStwOVX/mUwYSCEnQ9kMDGtCTpN3xJrz6VqrFPWu8/QjYsw6lMK9MeA5+DEC76SjYoK7irOX8WTZ28hWX4M7c4ULMdGsP4M+tPfopw5WLbeEVa8hpcAaUGB6IQFWxdoR5To6IUE2wTDbL5TB8NFmwt9uV6CUSVD35j/gcBklcREa+mTiMII0E2YtgXCbpaVJQ9ayy0ECUl2GKvAcTAHQEOnG+M52eegyeIuoKJCG0TDoymeG0Zq6BVPZQCIaZj98rk45aGj2FmIhc9BwVEuFQFS3j9uegiOY7fuRCHI+RD/e8FW7Z3S7YnNjL5gl8S4zLBQKrkC8Ikya4xOGftLSlsVeOYPYtBPTdY7HuOa8v7r4eaa4Ml50w9Pbfvx4z2hhJ2ZKUnlxLEOXng5AnaWmn261BIQpmbNtEJ+x7mzbfbBZ4NAfxawH+2E53Ojx830WW62o9devr+UwI9VAi8F7vQHa/wRvkbNmj9wVpkjxpmMlB9pEq9b8mr/cLJ6c0J/BPb2do71xxmqkzlmbriI5/4S67tt3H36R3xSsASUrl8tY2urh7CzC9foI9UdpJoj6WxqCVLalQICrmondChYoYAr0T2QCOfCgz65aflGKrOZBrKrPwVqt2BXypKLnne2MR/eQ61A9sEpBFYBN6YX0PFzLD1rouQEkoomgJdlokX3Ah1F+3wPBBfigj6Ajb4qLCCjUdpjatmYbbioeQRb0vENb5ho68xQcASEW32gPdAxWU7F31/IAjS3ErR2FWiQs13AQ1pQ+fMEtjDRkesGZicdAYSLumb5XMrGcARK0KT6X06O/nOwB/J2M86Cvnn6yl/HQT9+qVYGtWHy4p/026FFY3qaPvMcq2tdCZxbetaW9LO5udIhi8X4uHg+26SmTAa8TifAN3dZac4EqW5Pyq3nXPm7fvSkiaVnLdx+q4FrCyxXrDYTYmHZ7+jlZUE/fqcbgW6E2VkPtMocbnu/k8sXlxL4dyOBw2rTOadNE6Weq2C6U7DpnK0dnEaLM9ujWf5NOkiy090DVh5kKE9omLulS/EW07VQeOvnWB0sYHsnkCj4qQkXN27UoTmeRM6DZUt1anoKYDTZGfG9+if5P1ztxg6ey+AmLlrM644thp+bAkZjp72el1kK07FRnJ6EYdvi6yb4Oo6F6ekCPlns4tM7CW5c8zAzYaFoJ2DIGn3S/MdAu5KTIYx0dAdMn1JpYUem9MJYBSQFNJSmdvCeGzu2r/zu1KzlO2pmeo6ik6BaSFF2M/RDXUh0mGrGHPyGF6FmByibAxT1Hpy8K3EOdtqFGXcQdFpotnzJWZd0uZfHk0PzieNUAP+sOfMidhmnihnQ0i+6tTjUrXqTZ4gyC7NXGqKJj0zVx5wpH1EzJpB/9rM5TE95WF7tCpsiN0JnjZ/XMij1nbcnUHAtiazv9SKxkhzXH90EGxt9yV+nn/76tZrcW46R9/R1HBwzfz0bGz2JG2H+++tq+3WM77KNSwl8XxJ4Kc29XtHhMF+Wmjb97q/pOGtxeU3dXKgZArvfAZYfJChVNczfYnqUigfQswzFqQba3iy+uv8UrXYI29YkGt9wigjSGHlugHzqae5C013EGel6bUhdN5qCNRu5YSHTPFiIEekagliXwK0kjrCymWNQn0fJfZ0+kCMiGAIpV3ddM5BqLnoDZb61XQuWLMQqAI4R37xPowWU2rvnZGC0eGdgqMh5nfEJDLZUvuCLehLYnQD6MQAgnwMouyn8SJm3i7babHCZH1W4k9ec5nDxp2/dT2wJKLtxpSBm+ouO64jU9t+OAGb/g1NecPwRUx9HsRXHzPGUyw99xV8etXaz4GFuvibpfFF4tvWEmyfytre6IX7y4ZTQ1h7yux/q5fAbasTMKf/g/SmQMe7zLzfxzp2GEMuMNHxeQZnQTL6+0ReLFgP52Ac3CK/3UKZ9BuVxY8nqd5da++uV8GVrP0wJvBS4FwsaGHITDdTi+TqmzoWKQUpcnGkCH4HH62j7hTbOuRqTm2XQA57fS1HwNFx5S5exjQL9uJhZVgLdAja2E/R3t2HphG1FGWqYNvqJi/7OqtLc3RKy3rKw89BETwchmecK9Uk4UmnMlPaDbg+ff7WLNAzg51U0/u6qaNdIwxem8lo/EH84oNdnsLU6h4mdp5iaNMDoeDoTyGzH9L8sTwQcR33zXtW8DINYx3rHRq1kSPR26kcouRkIvjLfoebK9f2kJZ6fn+feszoa/9F6cNL50sfwv87AQrFaw+LVsmR4EOz/3AefcXZL/gbbzESGrzYKMgMCjbmylHbNhPDlHLMadmrqusiOLgxxNZzjUp5C8PQ8S0hl7t7fwf0He/jZJ3Nwx4CVmSAsDNTphWLKpxWKEfGv++BvkIWbymVbiHkYNT8/Vwb5IUYHn4/XlZ8/avPy76UE3nQJvBS4E5dmSjqedRnUhAN++Zed7TDoPuyrIC+6l09asF+2C7mOq6tOsCJ/PCPGudgcLALjbVNjH3QVsJO5buEO+d7VBmT/PM1A0t2B1b6HClYk6EsCtHQTg8RE2OsijveQDVoC4lxC0972cHJDcHerMLIKHAJmrsCb7on+TogoMWHfeB/FaaYeZfs55Pv9n+MFTfwjVZsL4VkHo/HdRgOD27/Avbs+NH0Hk5OuMJUxValYdBGFAUyhoqVAlQT5qu7FSH0Ts/M1oWzd3g3QafsIfV9IZqjJk2kvjGLoLCOoLt8fEt9SfufB3Tglu56qkX7a+WyTmnJuupifK6LoqGp3+53+OV/wljMVL9Xg2sO8z7NvyYkj5NzI9me7jkS+n1crJuscc8UrZQubWwOxZrBm/WkBhUcHQYCnlnzzRh1/+tMWvvx6C9Wyg6JniWZPVwWD6Ji7TlP5fpDe0YZew3vOm4De7cXgZoNV5IpFS9HsRikKRRNTk0UJNjzPb+A1DOmyiUsJfO8SeDlwB3B10sSX7RhRmMMqXqyO+9FZc5GiNpxEOSyHBUgUPejR817tvcTTIuq2EK5voOeUUapMS133nCaDsYP9Bz6wdDcVv/fVt3UJ8iNl6cFBE7CJYGsZ2vY9lMntpjlqq6CZiPISAt8HgrYUKKGZnjzqulveDyLL40j54+k03t9m0PeZw8ubMOxp2Nfehlko4OgYD8Zx+iuhh005Vl1ocg+1QzM81V/6uBUnrDSmZSkKCzfR3v0p9nb/H0zUJT9RaE+LJQdN34An5K0HfRO0HBPwrAR5nmJ6soCZKRvbuy7uPtSx2uxjogLxk4eDUFwQKkOAGE+/ulLBKQqaV/lvxFRD/Du0LxHiExWUZg014BMBXsvRj3SUJlzJVWe09/d9nDYC7sV4HJrvCQPm1pTPFLniVUrhaS0fNEL/ercbCrBfuVKS61/GksFAOpZhvXWrjt1dH4zAb7ZII6sJ612j5uD9d6eEEvesWICD0V38Fa03tAywMl7Zs1VufpDA78diCdza6YPkPTdu1Paj8i/ey+UVlxL4YUngpcF9uq6jlOvoERSGC9LLTp0LGrneibElkrB9F4dhINjeQn3n93jv1hY6oYWVu+/Duf0BHJcpNGphpEsgHADP76bCdHXtXV0KzBwGdloAmFaVIt5cAro7ZDPZNz3TVuwaCYzpCSDnggLEsOH7KfSiAlopRZemyAKVLndgQVDjICkJyfatxiQ0Il0Wn1sqzGfm4LnIdp4uIVl/Cr1SR/HGbdgsFM+5GpZoU36zL2ZN13OGxDEEFmVEt7wK8paLLIskAJCm1nLJxu6WhZj58mNFg0awQhY7+j9ZRMaxIAQvPb+Ez7/w0W4NYOoJjLgrMhlNKKXNg7EHyMDX9CP3yX5I5ZbZE2THGw/9JE+BqQLs+MyM+h61N/rLx5JZYZlhS7AZCZfIWf59HpwTR8BAxPGDY+Uno/FJiVuSGB0+bfwSuYDPFoHtogdZEXmfZ6Y9Id6J49M6Or316akiZqaKIKtfGCbiKiELoFc0JeXtuwT20cgkFsDQQLY9ziuJM8mhty1DMgNYGrfRKKLRcPdlPLr28u+lBH6MEnhpcC8XNUzZGvb6ObLqMNL2FF/qacIjoPbbSnsv1V5/rqpmmAi6PrzNL/AfbuzhxvU6mjt9/P2/3cfq+jSc21cEPDkO8qYsfZvIwnntfUNy+V8AdpKwOC7623uI15/C1CJV1GV/kjkc9OGaRCT633P0U6A36CPtbAqw0deulaehFWvIDWZx03rARZo6GLVYDTrBvewJSJ8/7ZCFZhLZC8QhkD/5A97Cl/D9Ip7s/TUqH/8HGHqIpL2BtLMH/8E3MOvTsH/+S+gcB9UgAZUMadCBkYcwDHLmc0+QC5vbZqmAbi9E3RDd8WDWTJfKdDH5MilACGIAzLDeeklDc3UXpsWNgpILhcx0t8RpwPNcicLPdU3Kyzb7rKjH9hRN6wQL9AzdN+yQoiV9a0bCk2Hhn6PwRAtIkOhw3IKUb30TTLLkBeA8rLH68rzrlKQf6IgxTP8LWXEwhWsxQl0Fje0LeuwF5yS+9nPiuzpN/V5ZAY+lXkkv+yqHRNFLWVree5U3z00HAZf//lyHPC+kDZZgPl2VTmbNBpPWDaZGqo3rn2s8l/1cSuD7lMBL/6oNS8M7Cxb+8IcYsZFhckqpVvxhUSk5j1mRE+f5NMkPurmYvunfPu+15xEctd4w1BA8uYef1jcxP19BnFmoNMooF7ro7fVQSw2YViy882uPMxnP4nuGVIkjsHNzMCLZICARUMJ2F70nd5G1VkQJf9F8wehyZe7PmXucJcjTCDmD4lgnPU+h9XdhlKcBq4QQJjSJMmcPZMwJpVws86dPVE2PCICm9zROET27i7y5hH5fx4K2iuvXigjCHDuP/4TNX2souSGm0geoWD7KjT62syb2Nm/Bu7JIAgO5ebLNsAsYJDqSiOlyrEmfCzUr62w/fhyjH/bgsYT6yH0s0f+mmEap5XNh58JvWxoWrjXgt1vIg11xO1BevC7VbVTKLmYnhoZlufkj/4eGtq8K8pC1TB6sI3MePUMipGHuudyLYb53nBqollyQfe68PunxLuR55n/7N+FghzH6Tr4ev+iU1yIrRsoPt3LqVLLe5eilRVy/XpeAPxLy7O31JXWvXMhQVqXqD7Us1eayDL4fKfpWR2U4HDrpyBtCLeVAmtla1cG9B7t49+1Jec+gs5c9OC/ZaJxqanjZ1i923WgsvCpJU3ERMMaAleVe5hm4WO+XZ19K4M2QwEuDO4d/a8HE/zIo4Mu1CNudDFqBNbs1eBVtP5L5rGlyN03/NovFSB13FWd21mXn+p7AHic6Wvfu4e3sLt675YKh7YzaJSf43EQC9+4T+M0FlCY8rD0MEfZzXHvPkCI2WaoGM1h9KtqRM3MFwc4mBo++hN5eRbi1BnOwSbvoMeNROtLBF8PlfJQOwEXW3xMt1igXYOgJtJygRtO0hVQ3kPV2EawtwZu/KiVYFd/6QYtHXxHc4zBEvrWEW/gCZslWDGSmC8fI8dH1PnZb/wrbyjFRzSUIyzI8JBt97LY3kM9f2/fdaqQXnr+B7e23Mdm+i/lppaVzAZ+esNH3K9hci+Am1OwP5kqgs2yVLcCtDRdaimdhrojV5Rq6613YiIcEMjy5gHIJsPRUmaDZ1FDZ0w2a3nWYZiZm+NHnIknuf6gd5kAnJHDTT092Oh4qDY+m5tx0MDXhqE3kS2AXzeRhRFMzeQfoDlCpkBzLCNyleuGBCI7elsPvR+fx0cqBXsgqdabUOJ6dLePKjCvgPjvlYG/CwcaWh+ZeD0bUhzcMNB3pwtwEFpwcu1tNqcd+53b9cF8nvcuBIuleS46ks7EgDM3VEvZw0jU/wM8ZW9DvJ+ImmposqFoO37Nb5gcoxssh/0Al8Ergzpzun79v4+0rJp5tpWgNMjzbTbDZT+HNkBpULWCnykYH4oA+MqD4Grnqacok1Wvv2VNcjz/HLz7WUa56Uruc42GowJ3bZQzCTXy1/DusbHwipuuFt7nJsKTAjGbpyIIAzbtfoLf0BO7MAozER63zDYroYtsnICtN/NQ5nvTl0DdOOlo376ugMjl3IEAXRwGSnRUkcQybKQRnalaagLuRBZidK0pqEsuMjkyjlYqFRm3INkZudhLr0KeddqEHHdG8JLqe2QRpArtUgr/wGVYerqHm7cH1DkqpTk862NsrIogilMbvM03pKROrRhCkJs+Fdn8TQJ8+LTY5qWUdYbtT2tZhQfEzgjcBNopHuexDGwnjGLhQk6s/d1FpeOLjj0Lm3KdS1rWfpJJ/XSnR3XB4POxJNgkM3iP1qWxExs6R8QFb2zF2m5FsBnk+DQj0/ytgV61s70bQE0C3R8h9eB7HvaPLwA81hChhdqEsxDocJzcmnCvbn6hZaFQtPFsxsLKcIc0GosGP2uNohVfB7+Dbr1YwO+OhWrFPZIwbXcd7zg0uo8onGfg47cm9H5v96NQf9F8JHOyF4k6q1lTGxw96QpeDv5TABSTwSuA+6qdW0/FRTelM7aaF//PXPp52Esy/o0hNTsMkLocEdmoNNgPbRo2+wl8y3OW6hfbSBia2f4+//QxoTFf2gZ1N0zznFhx88I6H53//OZZ2upj9j/8zSg0L7fUW/J4Bb7IMRD70oIlG/BT21jKY41+vZYgzD1pvgDzivJX5/eQhc15HTRIEJh2aaamccQLqUO9kO7qkxmXIyUzHlZ5Id+bBXpg/PQpqOhyQxeCzlJHzAiFAn9W4kkyqhCW1DJpBvnWyh/GGEFm5SZtG79Ek/P4uCiU1BpGdo6PEyOTAQJolQvXKhhkI2O3GaFRU8RF1hcTDi/xjVvTSCLYaErOMiaIBizz1x0yPgEytOIaDwZARb18EGuCnOVw9QsHRcWW2iHLJkI0AxTUIMyyvBahXTPFxH8X2fRn4qVQqYw4/q8HxWh60CnCT0unFqFVMzEw6AoDiR2ZjI819GHkeyuN2gAAAIABJREFUkzxGo/yH8Sf7Az2wv6ungPPPQatCmLuYv1rBtTlViIdyHY2T8uCmhpatq3MuTLOB58sdIOgLO588TiIzDSU7w976Nu7fn8Cnn85LgNwoMG98GIdes568o6NmMI2OpYsPffujeMN7GAwU7WWxaJ69N/5RzPpyEpcSUBJ4dXDPWYgjRxTlaHVzPN1OkNisE6Z8qqetGdQgCOz9Tg6WKien/KscoonSFB8Z2FvaQfv3/x3TE88B/YaYd3PjcCFI7uyh2Uj9FhqdXwFrdezulJA8+h2igY24OgXX2MNU8gT1eZqaSTNKzU0H1/LT5nYwDwXa3Gxopq2AWq4cgrthC9WsWl2HyCJx40BUXEBp4Y4isBkx5xw0fOwrw3KQG55ofxZzqQXKD59Kq0YSp3i2OkCzHUst+TRZhbH8CPbknBSPUSFeuWjbYWEG3fgpJjgGTZVbI+iUShaaO8NKdUNtltHw/iARUCZNsQpiY2AVCXtIJNOQVEfWta8WHDSYGSgbisNj5Dtq7bqmY26uIubq8TQ29r/TjLC63EbPV1S1Bml+aeHWNQwGyt/PUq2C2EfQi/d+pxVhYzuUsUodd9sQMB2NhFHevCOVkinV5kbavzTFR0dTQVq2SbZGXfji1eaAAH/46SCpCh835vkzqC4IMxTLNuYnXdlqjc9t1D//cmPMNgnwrmPi8RPAD7so8jJeSbeHqaNkBfj2i6ewHRMfvDcpueYnRanznjCI7uqVCu7d30UUsU79OYjxVZc/iP95f4MwBelxKyVbKtFd+tt/ELfucpCvSQIvDe7UtJc3E3z7PMa6n4L0KyFy9PUMzqyGaW9YfvIUP6csdmEuwXSTV5m+pTT4i86NYEUtmH7R7m6K1r1VGKv/is+ubcO0Cvjy6z18bFqyOHJDIT/ynHTtOnr9WMysM8UOBg/+HnFmYlrbhGnkCJomLIN1pmm+VuVAuZgeXrbPGq3S2EnbarpFJF0TmpRt5ecp8jgQK4DDUH1BDbZHDU5HWr0Na+YmdFNHxvywMw7mirOOfORMotO/ixLt5aLiHb6Qizs3QmXPlPS86qyHRm0bu0/+K5pPZhAu/iW8a9dBnzflldcWsLnkYSHuwnIMcWkQ9MqeBd1yMYhjlEyCF2lVc/S6Afq+J5H1GjdBObCzF6HoZJiZd4Xpjm4RytY4pdAJBU0KU4Jro24dSmEi4PFzbrQ6PUWIQyAcgSFTsfp+jEHAMqKHYyJ4LZ8ZbmzoQ3/nmnI3EAzH7+2oLWr0L2jCormrzRP/V/740bMx3goRmvXmTRQrJcxMKi05IKAWLMV4OFLXD9+m/XfyWOQ5Zidt7O4W0Nry4bmp2rMMt2+eqyH1O/iXXz0SS8SnH81KGtpRWlnOnXPZ3u5gbaMnOe78HRya+H7PP9wXdAHt7oUYDBIsLDRkIvs/rx/utC5HfimBc0vg5cA9A/7pixD/thYiq5FcHFIRy3Y0NNyhaVMFXZ85kNgH4ggosI2hJnLmRcMTJP9bM4RIJ+p2EWysA9vruJ6voDSzic9+Ng/DNPHwcRP37+/Cpxk6TFEq2QLs1LyY0tXpJyjbJsrmnkSEmwwWAOC4smyrdKsj6/V5x6jOy8XMbhTLMCpzQNSTzQhRL9dMBKkN13Bh0u8upnmVPme0n2Dw9BsUGz8XQDwroI6R9XbBRdyYxt5jDfNTGXQG5h1Z1fiWi9/ctIoeJsAtXnEx6w+wvfsADx/F6PR7sKdm4UxPw7u6iL3n89je/gqzc7y/NHHmKHsGrixUsPQ0gpMMJCqeZUzJSLe2bqPs1YXvvO9n2NoewNEiuFKiNRVudYr0yND2xUoQku81Q5jFxBx+JK2K51ybd5GkjrgFRpoZNy9MfSLi7jZjNGp0Nxzua2cvBDcA89OujFs2GyT0GT94ETdap2xQ5QkxgIqboVKke4bXHDnyHEZkojFZwPUr5A1Q7ga5/ycJYNgE960cPN0ZXT9Bz2dde9LOSsKkcjlkKmWQaYNhr4evf/cQg56Pjz5ZwETDRZoccLrT/eD7Af5ERrmqi7fv1CVV7GVIbI7M8o17S5IdpvsxePAMMb9xY78c0KUEXlUCFwZ3MtL92zcRfr0cQJsHGlMshKKGwR/QRSJueT6BnWQo+8FW55wRgT0apOhubENrbqGeLuO9agvXP6U5LsbnXyZ4stTFZ5/O4WefzErE7OZWH5ubfdy+XYdXJOkMpFTk7sYukk5b2L44jgNF6piF+pzje+E0TjajaZq+h6KABtVi3S4hSg0EqQWPJm/JeFZVw5z+I/hf/COcuZuoXJsDcilE/0LT4x/QdWA1prD7fBFbO08xPa0L1e4Li9sQ4JmmRuAk2Yvlupids2E317G1uYntjTn4M+/AnJyCtvAB7q76GKw9x405svhaYvGYqFvY2nKQhj5S8Z3rKFgpmnsDrO94mJ12sL4VArEPz8mHmurBiAU/+XbsGRJjQ8ZIchOOVxALw3EAyznxeprFR/dM4HiowU9OerJV6vYSYacTM7+u6rkvr3MzYkj+u2QbHmeReUFoqj+m+amdKO8TyYw0BFGOMlO8JZf6YH7ySgaqIu1p/md6JYPy6KEfBTseuULe8hwabDZ3gqE1JEGnE0LPdez1h/sIkZuyxPCiiUoGV+9j5cFTdDsBPvmLRSwusGhLKpsKyoupc4WChTtvNdCoFxCRy/dHeNCNQatEfxDDdo5s3H6E872c0qUExiVwLLiHodJWXEcT3y19moMAaPVTfLOS4G4nQumajgoJZ1JFeDbe6Hlec5GhC9fv5iRigz4ecX1GA8w7H2zvAM+/wS1zG9emB7g2b6M+UQK17qXlLmzblh07F1+WnmSeK9mzfD9BreqCATZcofmd6xjoDxf3Y9bzM0Zzzq+Z2+63kOyuDVPe6BewgVImKW+9UglOpQBLtHe18Bs0W3eeo/fsCby5mSE39ilqJLcGcQKnWkX39t/gT9+keDdfwew02d9UwOP4aDnX8fkK6UyuYaJholFNMd97jk7zKVrbZcQTv0B45+/wbPnXsNb/hNnJFLllIugniGIgylyEsY6AGmWSwI8SfHt3B6vPNbR7KWyESEMVf8B7T7eAZdE0zzcSbigjpHdCy3OpMAenLBXLVKDc8fOWOYxPStCdkfQZpiZL8MoFPHu4KellKqhKxRHUqzba3QTP1waYbthiYRiXxajJ0Vhlm6flGAQZVnYjAWW6DOjf7wU5gtxFOwilWh2fuaNtcdNLPzutGCmj/HVuSnRJzxLLypHNBdultv5kqY+tzRY8MxLim2kyODJin5YIBuYlKu2Ubh8eHC8bt9IErdVl/Ob/HaD78XW89dYERhXTNjZ9YeyrVBzRbEdz/TH9pfz5m+dGbDCI0ai7Ipuj9+XHNOfLuVxKYFwC++BOjYLH0lqGf3kwQKIDVVdDkORoRTmClAtYjsDK0XhLFz/0OWO8xvvbf81FiEFEg16Oxtzhamv7J530ggtfb4Bqfwk//1mKq9cnEGWMlFYsrZZpYHKigMXFqlgEmPbDBZdaEtm86FtVwUaaRHAnrH4jquOLAHjSEF7qczqw2Y+sMENti2pc2EOEDIPiFExGx+exgDHdBkxTC7eWEQefwSwzeO2sntl+htKVBTSj/4y79/4BrrWKiamiaJi8mjDAuy3/BGjJVqsyFdTip0EzdUxMkD40RndpF/7nfw/9nf8A891f4P7DItZXHzKGHZ1BWfziV+c88d8+X/Xht7somAn2djsIN1t0nCOiC4RBlswQYM+Sv06+e8qE71V9e1pxxF0SaSiVUzTbAWyLwWQMdlObThWkd4IchhYAPl8EyOuLNbT2fGzutLF4hW2wL2DxSgEbWyGerAxQcAwU3BcLp1A+yqqRSVEV1zbkeVlaHYjPfn7aAakQmJNuzhawsdlD0++i4iaHrQlMOcwzbG920O+oZ4DbGct2MDXpSrobffaUPU32DBbkRmtpxcf2ZgsNN4Br83v1G5WZD1+OQH0kjRF4sVJa3cvhd7fx+b8GWF2bx9R0Wdr3/Vj80JTFSUF3o/Z+yH8Z2MnStoUhA99INj/kOV2O/VIC55WAgDsXwoGf45unwD9866NXSlEsadggkNDUWFRB0tTky6ZKpXoVYJfBDX2gNLe65FsfW7fOGnyeJPAWZhFYv8CvHn+OnwZt3LheESY5QpctKU2nmzwP9cHSq0Nm+CE2HPr6pDdcLM47bD3PhLJWY756KNXcFcqycS1HFg3EBwyL8lW2XYKclQ8Q7XyLYOeXKFQXhpHlp4+SUd2anqJ2Yx7N/qd4staEbQ3geq5U8SOAUN4cvx8oP3EYpSrXedg0ccQydMnF7gQ6rpS76HUfwq79NYyP/haD9vsINQ2DToDs7n+DpW+jXraxocVwvQj9QIeOFI5OulnFiy+LK+liKTXGZCSMiFebConCB9PeuNvgtUC/p+ObbRdLjRquXJ3AFVZ1K6gypaeZs0WkJLjJVH3vDz6cxW/+OcDS2gBXZl24tmqDPveKZ6LkqWDJ8ftM9wzzzZ8899HpJhJYyCIpvN/FgiHEOIxgH6noDNBjZP7TZQu7ux3U3VAAme4CzrdoJ7CSLqNOpQ2KORgYeNx2sFH2MDnholY2hEmPJyyvDbC13kK9EMAdczvsj/H0R0ANS9MkdTNOelh7+AT3v3FQalTxy19cw/Sk951Watsf5/fwglYhBhLuNX3ZcFbKTGP8HgZy2eWlBL5HCShwB/C7pzHWCikwn2O6pkvajvweRgA2VDgv4lM/a14jYHyBu/2sCxmgxOCnK7ewVajjf6x8gWZ3FR+9X4Zl29hrBqL9OI7iJz+tOf7oj/PnnnYNv+M1aZZBee5PP5uLuw4SoTjQbQ9pEskCLzlhVNn4jsF1uYYBSpIrLqlThAUzQtLbRP/ZXdRuXgfI504TxRkHg+90PUbtzh3saTl+u/QvKMcrstixLCfBkQDvDzLxgz9fH8gCyHvOzQHlQmsOrSClAk3ANWSNW7IhcenSKC0gzw3E7S8Q+zvK5DxIkaURLEkDU1o6NfX9LdDwhqs/AvFjKWPcSQ5X4LE/aeCjs9ZCZ2cHqysTWLg2gbnpIryiIqZ5IRBs2AfHT60tjjJMNAr47LNr+OqrDTxf66JRNcU83u2nuDrvSJrbKBiPYqVcGFG+vhUgiDIszBUkGn5tK0IQpLi1WBCCmXELAmXGTcK7b5XxxDGwsbqHBkI4tjLRMyCdqYmsUBfSLDashJcmAZbbCba3OmjUi1J+lRuL7Z0+CkaAgq02KWfc7hO/pihZMKduJCiaMfq9CM8eFzA7U0LJM3+UmjstH1GUIQpSYWhkquD4vTpRWJdfXErgRyQBAXcCVVIGylc1GLmOLM4krGs0z+FaO3r7ev6SLtQEXA9obWUoNxSt59lmZ9U9wYtpVKXJCfj2X+G39/8A49un+PinNWWCz3I0W6EEDo20VC72xBru7EcHTZOaZSPLzx9wQ3lIZlqWina6D16jRo/5S583I+P1YlUqsskQJP2N0V+p+N+TXEdrry/scLqrCsboKbcFIcKlNRQeraG+WINmmqD14qxDZGQZqLz7E7StIp7963/BpLmFuRkX1EKpmdJKLfd37CaPouupddJqXvNsPN5ykZTnpXSslI3VDHSeP0V695/w8Y0c09MlbLcS0bg5N8Zyq3Iz46Mc62T48cEnvCf8N9pNqhNMMxcffBhH2FndRnOnjfWZBq5dm8D0lCtlaDne0QaNoM7+Oa8wVtYIanFTUwX85V8u4MnTPdy/vw2/E+Dtmx7qFUsx3Y0Nk37abpBgYzsCTe9XZlkxL5cqZzRjl4uGpP2NgvhGl3LDxBiCG1eVVtzabsLQmW6nZNwaGOgODNisaCePIP3qOopmhnoxQTaI8fhxIpXxjNQXNro0O3hWR/1c5K+SqNrIFR26GQJ0232EUYZy6dXavsg4/rznqup/tEY1GoU/b9eXvV1K4A2RwL5Z3nGBzNYQ9YeL/Xc8QAEOU0N1UsfKg0x878XK0ER7zr5lN57E8GplbM+8g3ura3j3ToT52RK2tn0pGDE364nfnZpZECYwDfp5lTZFohHL1lEqFxE1mTJ2QirT2HgIGmmmCwOangbDb05fJHXkGOQWev0QaW8HYJ77UKNVxWRiaHYRSeAjba0jz2Johq1Qirsd00FufomNf9xB9unHmPjkb2A4BWQRi8uc7oQXgDdS1G/fBML/hObn/wC/N5AUMGqn+27cA5RV/e7PLEdrL0JPq8GsTaic+zCE/+BzNL/5AoXOCsxrZVV1i+6FIXmReopUowT6HCTZLYlbgU2TP58HmfgsBNClPrwhWwKhjqF1BhFSmMidOsqlovADMNJ7faOHrabSyBeveoqP3VKm9pHLgSxwBGR1MM4ih+eZ+MlPZlAqF/CnL9dEu5PzhzYDbgpGz0ann4qbhC1w48BNg+dyZ6hL3MN+08MeRn8YRyKFcuaK6DZ9JGkM2wTavi5BgnUvVQxzwz6jhI0zuI4bkhRNv4du38BMNZUNykn9jPo76y/T45jmx40H5xCEALmNZHN51sU/0O+puTMFjuBerbKugCaxBj/Q6VwO+1ICLyUBAXe5UsVhvVQjL3sRFxsWiyk3gM3nGRbfZVDVvhvz/M1mKeyCjcQsIPT7qE+5kurGYK1xLV1eH1nVyCvOnOgjH5/ct3Caa2BQkp6RPOV0YJeG6AMkKc2gJ5Hx9M+q4KjhKjtqI42H4EfqV9ICEQHoN4iR73yOYPOPWF//PRK/j4mPfgGzXBluAEYgpi45+j81bd00UHvvQ7HoL6/9ExotH+Wyva/xHrqGfQoTX47tnRB3W4uIr/8Vyo2qaNECcOv3gZWv0NYL2GmmmJpQ0d+5xiIuKi6DbajtmoYYLiK9CAOxALmRKba/VGNQmgMjj0VjTfUCKmUTOjL0e4EE2k1NFDBV4ZhMNH0H3YEOS0vQanbxRTvE1q6Ha1eKooUrf3qOQZgjPsISR4Dnwn/zRlVSpL764wrQjDBVZzaBGi2JbchaR/pa5tBP1i3lphiC8fBEOfuk/wREA1oNUhhGjm6oC01vw0vhuYooZyhiKVvLdgjidFTwGWXwnGMdT8l7Up9HP5dHKs/R9A3Y5RpYc50WDjJJTs1U4FgEvNM3hkfbPPqefdDyxQ3SaD5Hz/k+3nMsNIbx9gvfwfcxiMs+LyXwPUvgANy/h4FwbbFsoD6jY+VeKkx1XvXFNKKzhsYiJ4VyAb47i4cr9/FxVZmsd3Z8zMwUpbQlgZ5aGRcj5r7yHxcnIboQ7el0gBwfA83ZrL5mnckpr8At1WxEsSFsdGS6k44P7QmGfe/7p4ch7Ox0eF4W0KSSI9p8hI3/+/9A66t/xsx/+t9Rf/enMrSzfIpZkkq99omffoSOEWO7+yuUigwUO57gg7JhStpuO0V05aco334HeuZLtoFu2jDn3oGz/BhZtzvUijT4UgAow0FKMavUWwi0KhLNQiHvwcp9gUlaM3iI3x62nGdkoQQdsmCKZ6XolMow9VzSywQwYw1hDJScFI1Sjukq0Asi+O0Ej/o9eOWiUKqSC35rL8Hc1QpMavRjGEYgQpJh8WoFzb0JrDzdRKPCFD0NW7uBUPLy2XjnhicMePzmIhgooAqyowXQs1CqIzYDHbaVoVQgTe5hy9hIM6f7g8Q0/VCHaw/dJed/JEWW4/9RXnx0HDOFXbTx4ac3MD1ZkIIxHAFDVs4KShxv77jXzEKhPD2PJEEqiE3JanRvj7vqu/2M8ufcaSVTzJWHfmjfbeeXrV9K4A2SwPcK7pQDd9isBud4GtrbGUp1xV1+Hg1pJEcCm+VaCOfewoPlDdxq93DtalnoNR89auIvPptHtxthfb0vQXDkROeOnmlxG1sD7DYDSclS5uNRqy/+5TLBWu70t+vDsqX76Pvi6cNPuMoyjiFGHvYUyw890woF1DmiatAUT9usCaSMLj+yKEmwAD/NxXTf6+zA9Eoozs7DnZxFHquIdF7GKHseNMmPH9TgDcuAPTkNv1tAErdgncLeRbkyYM10yAfASnHSqoCzMf8W3LlbWJx8gGrFlEj0tbUOrDyQ6mw0Qye5jVg3kGg2TMQwEUqMAoGU/9SRSkaAhYFEzutGCbaRwTZT1IeshRRVx9ex0iRxTo7ZSiIrOAGq5gGVQirFa0gl/OhxLClfRc/B7dsNCSajxj5+KE0zw9WrZWxttLCxHaAfsOpajltXi0LbK/XfJchx/MqzXxNQuv0Ee62QBhfs5aaUHa4VqckfBna2RjcPAZ7yavvMy+bm5ex4irNHwjM0lNwcfnsXX/zewZ1357B4rSzkPbHEI5yvlePOYrDlxsYAX3y1iSvzZckjL5UtVCsscsN7qzR62Uwd18B39pnaZPT7MSpVVbfhrI3vdzaUy4YvJfA9SuD7B3dq7w5Qm9LQ3FR570yNG2k055UNgatYr6KzexN/uPsH/Ow9HW/fruPZ8w62tvp48HAPgyBBveZiZ2+gAskY8Z7mKJcdhMEwonYIuuPYOxoDlyzCRMAyqpk+Fik/AqrRmcf8lfWO/PkEXv4bOnIJcgSROESepdCcEvJ4ILzzBwA/6lm1KxoJQT5MkabWoY2CRrP4EDileAkRZfyg6uiUsRPUcG3QhlNQfvLRfEenE3S6vRRdfRJ6oSJjGzXDcZqlCuwbH8BY24Xfb2FzsysV2soF9kf5AGHuwEQEL9sT/7mahdp4jNoazZFpczTbu7YNU6dGqDQwOU+DaLNztRQkUxvEuvDUU9uV3jQV2V8zGVfRx9pyBLc+IXnujLEgO9toXqN+ed9JZqSbFp49b2Fh1sH0hCv57gSklwElypBz3GuGCPxQUtjooCCwF21W2xv1fmC8iVMNzb4qJlNwMlQLmQD8+LkHV13sFbsj339BD7G39Az/vLqNpVtz+PSzqwLGpKWlBFkt8OIAqAlJDC1f397bwcKVshQSyvOO3Dj+phYXK3Bs45UtBBeZNe/B6P55BVssdRef20V6vDz3UgJvpgS+d3CnWIg3lUkd7e1UAH7+lsqDvpDIckVtai7cwVf/uoTu9j3UJ1wB02dLbQzCGD/9yTQatYIE1rFtCZ6Cjn7Xh7+eipY36pNRyu2BiqCn9sNFm1pjmAC9ARBnFhzNkKjw0TUn/WXQm2YWYFRmFMsOr4oHyMOuTF5zKtCdMnS7CNieRMtLGdmhBq6broBrFrSRpRkyEsFoJgb9CM1nq7A8G7bnSZZDd3UT0co9iQIrvv0xipMNZCy9Nzo4FqeEwJhBN1jGBNnhTF2ip3kK3RbyV8vR7mtoF99FqTYnwWz72ERZ6zmM2Tt4+OA+iptLmCuHKqWM/k4Gw+lkBTRhRF2YYOAh21Vtj4ay/zfPQF2VoXWmZYgpfhzcgojWEg22kUrsAgFRxsLmRtrwcHAFh8AZodXaw+9/twLnl9clh3xkQh71yfapYTLgaqLhSIAhzfFHtfzR+Rf5y82V5+aoOgzgVHMe36xSxNz80ATPkrl8ribLCQoWN1pjm5qLdHrCuZwn72nFyxGGHTz9NpTsgDt3pgScWR2OtRZch5ugke989PeERuXjHNWqjQ/fn8Lz1Q5mZkg17AmdMxnhuKFutQN88N6UgD7l/+c6OOcgSKTyHTc3dK1cHpcS+PcmgTcC3Kml2S7ENB/0ciSRxFEd0nTOdWOyFI7nwJh/D4ONDWQ7W5JTPD9bwe2bTJ1iilIu6XFsj+A+CFJEQSzBRaJ5UWMNNOx2VKEO4lHRysSnyPNpuk19RndnSDQHdu6PmZiPHyVzvQmwWX9PNHJWUtMdD1qhogCPkd2hj2zQhu5NwKzOImPagqjTTNVzoDFv3fHgFBwxe3O50rCJ+I//F7ru/4qJT/5C+N39u39AY+MfEecGwmIBzsQvVVDhCC3pwrB1pKaL1e0ERYdLn4G9Topi0cB0wxpueiAapEVXwXCTMT47GVqaIAlDmHkMx1bmZZ7Drpj+NVU30e8VkfR8mALf4y2o15xHrheQWwUU7AJKDgTcyQNPXGSq2GYrQ7fVh6mliDUH5UpBzemYRZsgSrKlWh7i4d1VtLopfvnXC7i+WBGQG2dko4ZHF02paArpyWhj8+IoL/YJyX90QwUVUhYjaBGZgZYHTVLi6N7xnBS0dhDgj9TFuVinZ5zNAE7yPjSMGJuPl7D+dB2Oawl3/7Ub03jn3WkBeMpA4lOGFLpKC84Oad8EzL6f4NGjPbTbobBBzkx7wlNfr7MWrYaZ6QG+/GoLf/p6Gx9/NC2/Ocr+ZSwiZ0zt0NccP/3/dNHw9873J5XTPXTh5ZtLCfzIJPBGgDtlykWw0tCwvUzKzByVCVVg4yLyZhtanqK4sIBO9w5u6AHaq3uSFsNFhxaC8cWFiy2v4TGuV/YCDb1WG3bG6m0G0gEju5lvrkzKtpZIhHeMAow8FGpRpZSOlnG2NwyK03Sk9LsGEdKgLe6AlMDdb8KozkJ3y0j2niOPfBlEniXyGVPixMk98p+niZx/5a15XJniwDPRptvtNWw9+xzBjTsw3AIshCg7MQzNx/rqFwgXbqEwNYN8THuXVKHMwupWgiSO4FU8TM9UEfoDIWmpVCxm5cvCKDrySEijm8HUolxD99E3MHfuoV5h+VGmEh7yEIgvXIeD3X4BZtbeT//bbwa5aOuxPSmuEc8moU4G5oOr/iGbM3Kys/CMiR6gO9DzGYmcP4DNUYvqLzeLJI+ZqSTYWl7Df/0vXfz1L6/jk49mJNZiHOAZi8FqegY3Ma/poBtA/Aq8TcM2qa3TCt4ZGMLeV7AzzNYyZaV4zdr6SdPgbRRrhR4jTUKaShDvabjX7mBzdQ/VehGMVXAcU4LRaHnxPBslzxJeerZLsKQ1YvVJVwhxPnh/CrWaK2b98QI0/Oxnn87hyz9t4re/XcedOw00JgpSYpcpinxWXpe5nL9j/t7kp6IcZEghAAAgAElEQVQBO7t0awHlyjCd9CSBXH5+KYEfsQRe34r2ikIi8DJSfnsFCHygOvmSDYr2bqJVv43W1ireupnBsE0xZ1OboiF9dHCHL6DOv7oupuZEKnVBUrYskCaW5DM866BylgI8DZFRQmBOw0jDYY78aCknGVAswWX0e+fks2dFOKn6NgxjTlWAXW4w1Yp57+wjRx70xDSvO0Vk/aa4FWS8WaK0f6a1ySqWyXfVsoGe/wTx7irM6+8jcScwyAto2CmszhqS9h4wMzeasvxlJT5bT3Dr3Trevl1ErVGSIKvP//gcUcyiKNTUUuy1UwwKJlzhfz8wq2qGAX9nB4Mnf0LDbO9Two53ovYDOVhnvG0XEA9CWEMaWnUeZUWrAbXmHEG3g4j17XVuT2yYGr9JJAAy0oqAUwMISGBMATdZI1mP93rwmt9WCrlE1m93mviXXyXo92J88vEMKhVbNnU8h+ZbWhnI4sZ88Ndx6IZsTWQzOdozME6j2VMboAbpne1MNnPj5vrX0fdZbfC+UPPWbeVysswcSdpDZ62P5qouJZK5A6W2qxmWMD4u3JjFJz9bENpdmtf7UYp2J0Sp7Aiwi3Z8JGiRGyhG0X/w/jSePmnh4aMm9GdNzE6XQO4JFnURt5gwRJ5+L0+bE/vmwXm1WyGeLbexsd6XgkMlj6meB8/tae1cfncpgR+bBN4YcKdglQVYk0XxZQUti5eew6qV0duu49q1BNu7A2xu+eKDG/kVqb3S38ijP0ixup1KfjGj1ftM6dKKgreM8GYU/fjBpYgWAhMpyo0yau6outZokdLR7WfotXvQkgEy04XpWkg5wVRtGMj5SlCnP143HWSMkCdBShLKP832gMFI22WHhmiYaZIgyxzBNs6VJtRi3kN7dxPZlXfE/y4RAtSw0j7CThsZ87vpe+QFmobIj2AlfXzwkzrefbcK3TBx7/4udncHIE1qAEf8+cvrPvQb/pCFTwGqbIJMG92nS8h2lpA4LLiiTOlH8ZbdFW2mYhWwE9koogU37w1PI22NhURzoac+9LitwD7XYWrqvmRIZfNlmImw+/H83K0JzauhMwps/K68+FosCTowUwPa/S6+/sMjLD9v4eNP53D7Zh2M7uYYac0hSGivmK/NtjimatnCpmUjSEKUzBzNvoFepEst+2oxg83MA9FcXxzzn+MTEdu+7GiG11AqcEx8zof8CgYpeAP47RzNZmk/B58g/fRZS8D9vXcmZB78TR13EODLJQsffDCJVjsUWuidXV9IiKplRzR5Vmuju+CkNtiubEaEeIrBf+pe8blnQCBLOPd6ETq9CINBIlaG996dxOyMJ7/fcUvdcWO8/OxSAj9WCbxR4L4fIa02468g8xy25yItTyGKd1EpWVhd66HdVoxyXFgJ7ix5SfVpbXkHS/efwHI9lOoVlMwBrBygxkgQpy55kLrFYVHjZ6g9c91jScdSi7UaOLV1zzWwbVbR7lfh+ymycFuA6qAdlsuNlcphOkDQVWZraq9xAL1QhcZAOmHBOxphfiAatucYEbTWCsJuT0qAJqnyOxbtAN2dNUT9AdyyTdou2SQk3S0sVPYwNWkijpn3nQjHuOMywrmGq9c45hCw9rDsOgcsdoJdGoKuj2B9BU7eh2Upi8PBiBTmCs7J5oPFW3ToPQYjesLCpucpQq0kbHRkZXOyHnRNbaD4Xhv650eyMtOWAA/vhOm4qAwZRY+HlPGRKPGyHabMFewBdrdX8T/+WxsPFifw6SdX5BlY3dOxvRuB+fEE+VdR9hhMWPYM1OtFbK4F6AYZyBJXLSjfOkf359bWD0vk+Hcia3l8D358lpWjkOdI4hRJnGF1zcfGVhc3b9Tg2Cbq9VFBlpPvBMGYRqmJiYL45vv9kgA9a81zk/B8WcOtGzXMzJQkB3/cVM97wSBH0kbTUuC6hpBTkXmOde25YV9b7comrVZzsHitgnrVlfoJZKi7BPbj7/Xlp/8+JHBhcBfrMWUzWgOGv2vRWl5BZmw3JPYyr9q6OJHNoa7JO+9YiMs1bDdj/OzDMqZny2KC5SJGSx7Ni1w0Ot0I/XYHcXcPtlMAS2jaWR8J6K/j2Ye19lE/NOnT387I4H7owDXTQ4u2ZWSYb2ioV0w8W43Q6+8hI2iTxGZ0UFOiS2A8YI0adsIgvapQ0uZ+cCDrY1RVBkq5lNfuKlq/+zWKvUfwHDLnUbMBdEass8uci6wmC3W2u46FyQiVioeE2mqWSyrT3GxJFlDbYsEdGxvbwPNBHSAVbh5JHn7UbmH7t7+Bt/d7VKupaNQE4xEuiJU0Z+Ah5yUQL4FxlkZj+kCCEBmIyBiGYt6CmfGm87zRA8V3B6+VqJS9PDWKEnBnDDcCIzGe5y8BlUBRLZFsp4evvwjw5EkXH3w4Ca9eQac3QNRKUC+bKk/7ZLw6tTv+Dihv+qkfR5ZsDKerqTDRiTRest1TO/3OvlT8Bn6zid/85hmKnoubN+q4sVgTKxCB+zwASplwc8CDNeWLRQvZTBELgxRPnrbw9bc7IA0vaaN1XdEI81nt9yNsbDLqPkQwiKXao+c5CMME/iAStr3rN6pYuFKReyZb7izHuO//OxPNZcOXEnjDJXBucJcaJ8xQYxBOwoh2mvE0GBYpHhlZraKHXhrkGXAU0C+t2nvVNVAlS3GMLAFrwCkeVELjPeE4TdNAqxPi18SuNEUadBCFJSSugzh3xO/O4ifHjYUARCrVKIwxiAsC7uP3mu2LBm/FkkPtd+fQXA4AUtaK+ZMUs6kywdNcT1OCCE9DHkeKX960VZNDoUrxmfFOhpo0feDVcAP2xjrqbk9ywvs9oGndhnnjA1hFR+ZHNrrB6hIWtAe4cWVYtW3oZyapj+Mo4pGYkV9ZIlaAnJucIfzqpom43UL2/HNMFbdBTX+jraHiahLtTQDlUA/iyVSoOIPvTNuGkSYS+xDoZckyMPMXgf3I9PbfMnjRLlUwUWYUvQrI4pf7m839M49/wXG1+gY2uyYcM0OtxGe4hXtfR7j91qT4lle2eH9yzM84khI3FPvxDZ7wKbXNIMjEFVSyI5Bylq6ON1FbP2EK+x/zueeGyI583P1mA5/91U1cW6gMYzKO+1XsX3riC2V+V9o8ffLvvzcpHBQPHuwhiTKUyraY2UlC02wFsu9j8Rf66f1+jG4vEncaN6LMdCgWVF1GbjLGtf4TB3D5xaUE/p1I4NzgHvq5aIJ9uoG7OaZs5iNraCcZdrQU3rQGr6z8d8Sul1kYGdBNJdagqffl1g5126i1hjGcwTauLjDxWeUvH/3xy3vph5pijizsIQ4DZENfvMGC40e0yvHnghuILA7FApAXqDcczqilDLioV9wM1xfryDUH/XYLGDQlb13861kiGrpmFVTEPLVWktqEPjS3JGlw8tpwYVYmYbkuQ/72Fd2Rjlsvkm7TRDOZRuKbSLxp2O//Dbz5RfHVc9wMkrI6q3hv1sfM7PShamgcq1p4h4Ap78n8psrTqtBD5mGbKLiWWAuSXBOT805PVTqjqOh+TVJdmaLFxE5/fIaiq2MQedCTPXj5LljffnizxkV6zGsCuQHNLWOiasA1s0NAyXHzFo7kcEwDYqlhqVVG4TOQjf/o+2Z6YTNKYXLDFSUIBzHanQyNmgWbZUJ5fy/wHHIM3KPtthOpvFYvxPIbuUgbx43/+/xs9ERfmS/hvXcnRLMmu92rHrxv9MnT33/7Vl3iPf7hvz+VlDma+1lvnnnz8/MlidgfWQi4mZP1hb/MIYPiq47l8vpLCfwYJXAmuDPAm9VFV77JMJnp+IvbNm7eMDFR0cGiK4Mow/3VBJ+vRNixMzgTQKVKk+zFAJ7nx6ECdyqs+2v/y0h9GDxmxKrcpuDlMSusAMNo8RZ8z5BR7YQBAwlizYWZMwEuOcZUrNRGLQvh+wmikiU83mxz/JC3OUlKgORaBc9WHIRWUXzP3C0JKBkmtGIdOYPoxESfI9dNaG4FBnPkrS5yw0WpXhV/YmdAtrMDPzwroFl6gk7hNvL3/g52pQbbMmGXy+o+MFLfMBC0ApTSPiYnWJFtGGA3Ptjhay6kvL5UzKE3B4rGVrrjaDMYunInEGjnawwKPABYuY9Jhl6o6OM4f15VsIC0UEAUO+L2UCq3zP6YERx8RH772PBQqRRRKXBDcCBgP2RtdPWsHVwx9mp4KsfUCzUUrAxT5US5KXgaa6yzlOzeANevlvD+bQ9bOyEePu2jXrUwWbfFFKy0wrF2j3lJUDd0DXHCVKxAAicZhX/MY3fM1W/wR7JZ0+A6prDNHd0gv46RM2OBMr6+WMWtm3XUa44AOgPt2B9Z8C6PSwlcSuBiEjgT3Oly5sJVrGuYSQz87fsOrMLBolyGgekJA2/PW/j6eYSHuwnavQzFaZrXz+875wIc+NQTNKGjPQqSF5oWfe62hb4zi6WVZ5idYXUupsWQLETp12yf86IWwoAhpqlphQZyt4osD2DngdChMohLEw3++BGQXpWV6Jq9GmZr9Oe/qO2xL5rop0oxBg0NK60B4t62aEH7OidZ50xH6r1n3E3Fvtod2UXRcukXD3pdbO5ocPVwuPthtTUHmW6jYmWIGhnKtQaKs3MSlEdXA6+Tu6UbiLsDePkAXtkWMh7OSNIBD27n/iQ1XfniZbOzj6e5qucuPhrFUie0qvtXKa70yGCBFzY6+qfMsLWKgZ2ojKQfSKqgoOuQDuDg3LHG5GWOTLMlol/X6NJQ5L20EHQDXXogEc9REKXM94O4SWxjMsiRGxO1P5BNh0aXQopWt41ny8D779Rx+7qHzd0Qe60YnV4KFrGhJk+3k3omaf49AG0+t9Q+/SBDqx1JwZh+pwvPYfW3YwR7dHpv+Hu5g1qOMEgkGt0rnr1kXGRKdGNwM9TrR/jsZ3OguZ3+eZJFjfMRXKTNy3MvJXApAdbQPOPgQsYFsX5Vw+qzFN8sxfjonaEveHStBkxP6fifJl04f4zw9w99mFX6cJX/dXTaSX9NC+g1c3SbORpzrNwGcQGcdP5Zn3O3TxpTlCfx+ImHD/opqnUgCnOJuqXW6BUtAarBIEQ4GICpZ8WpecnZNiRdjUvzyHR8XI/8XpM0sSztY6tZkhrs0+XkWIAn+JCF7MqkjsCvYf1xE6m/J2MQYGPAm2krM3w0QNbZYBQSNNtF2tmSHO+k60LLrpIaHlbGoDtinYskN9FOUpiTDgyTPvsAOTcIw0PoZWMDrdUmbjldFItF5Z9kvjOjw1VD6myCFSnwGftgZKBfPGOgBR3HPM+wYFi2Kv16TOS3NEXwG9OxWeVst5ejaudo1B3sZJNS55zAKFJMAhiZPyx3O9oQcDjc7OlgJT1aArhhKNoKyKm1R6mGqZIqo3pmhPsQZxVAq6nyNZ81cr/vdrt48NjA7etlLMy4Uj6WEfTbexGanRiloiGaK/PhXVcfUhWrIiWb2yE2tvpCAqRlkbhhbN4HCuMHfFBkdOUMYgOLsyPu+Nc7Id63StlGteoKgRK19JF76PX2dNnamywBrkOyHgxTUznW0fs3edxv8tjOBHcOniZyt6ChV8vx26cRbl4xUSkfmIVH5/zuXoTfrAeo3NBQYPGX07BxePNYBI2g/vzbVKrDTcxrUinuVYUmGpUFbGwP8Ic/7uHqYg3+IMXKaheOpaNSJQFHEa2WL2xrpHa1zAzlbI+eVknTUmMYosKRAaUsUypR3+KuRWd7F37HgXa9iuka4VppeOOXUR4M5rq64CGMF7H7LEUedRXAk+yGNc6jgdDMZpaLPOpBL5SFTCRnffckQNTvIvQmYOnRME2PwJhhULqB0rt/AbvaGFaVG/as60ijFIOn36Le/RrXb5B+1hbzcRwluPd4D512KCDPK2jdqNVdvPtWHQygMyxVzlNaY2R/FgngExTHLOTj05SPuWEY/TiZ180NA7WxeonscUXxydOakaY62t0EYb8LLWZOfSw0NVR6KcNIc+AVuKHI0fINhIlKLesFOsho5wzB/jgcPXTnhq6DQwPlFATgNdQKCXa3W3isaXjvrTK8gg5voYBeP8VuK0IQZVLSluDDeU02bDHbb+7EePy4iYLuo1ZQpVS5YfmhAzvlxE1yL7LRWJjFhx/Oy+bmdZPC8Hkrl22US7ZUbuRd53PxY5Df0Wft8v3xEiCwM8OBFhvLMoRFUhQ8Wo3FvXv5PBwvudM/PR+4s40UKNeArVaKr5/F+OsPmSN+cDxdTfBPTwLEUzkmZriSn+5z5wJJ6257O8fqwxReXcf8TdZZV8ydBy2/3CsxR5s6ylMuut0Qy8tdTE0Vced2HV7RluIxrLn95PEepq/OY2J+Fr12H0lPUcSyVy7SNMuT1W4cKGKtAEZvcw5B5sLvNRHvLSPKNTzJFoHbk5ipEqwPB3+xTRYMKdspblyrIA6vor3yBBrTwRjckKfCKW8U69CdEjKfgXeJFJRJk4Foy6nfht+vSN46y6SSOjVyZlH47H9D+faH0sbIL8rqcdS8/Mf3cTP8HB//jYWJqQrCOJeFutVJ8OjhNkpWKhHInM8gyHD//g5Y/z3VXCS6B4cpETlz8vlrs5EZzL8fWt2PQVVl01ABhiM58i/3A4x0LzvDgEC5NkXZ1dEtVdHplRAOAmRxX20iMCx4owONUoJBCAQxIZ/sc6ygpoD2JCA4ZmgcxgsHz7NNoOHFaLd6eL5mY6rBOve5pAYuzLpiIo5TVQKX5vrNnRC9foIgzGHqifDD03R/1D3wQmc/kA8o5SAG9GIZH326CHLHhyTDf80HZWxbusi51QpF5iSouTTJv2ZBv4HNcb3hvd7bG+DJ07awURLcudFzHF2qeBYcE9PTnoD+aF17A6fyRg7pXODOkXNdtx0NejXHWjeVVDjTPoC853spuuTLniLzmiDjiRMmqPPGbi1n2FzKUJ/SMX9LF3xjfffXckiuuwt9Yga3Gn3cXKxILXOm9rBvmY9tYnujjYXJCbgFG3/84wqaHdLQJkp70AxEcIRUxc77AvDM1mZ9cisPJTDM1IrIHAup5SDzW+htLOEZ0wJvTkkQHU37R8GHRCfVYorFxQYeBiEGW0sgsYug1vBkoarl5iDoQXcrQNBR7HZphDiKkOYFWBI4mAKFCRSu3obpOkjDwYH4NANJZwfps9/DqO9iZ7eGrR1fzJ4sGkJWL/rlp2bIJ07ztwbyuD942se//dsKpq7MQ2s4sglTvvscmV1FgIZsUhiWdxaAUtaspMcTbVPFXssUxy6k+b9R1lD1dPihh26/CN+PEA06MFIfcUhfuy3BcAzUZpuST89mx9o5mPjFXxGUXZuBdyFWNgPoThVZmsDsxZibtMTdQd86ZeQVTFQ8A1u7EZrtaDgGtem4eM9v7hUMDrSqjqScnX2nX20edJM9W2pJWtztWzVJUyUN8uu6v682usurvwsJcC3u9yLce7Ana+udtxrY3fPRagXK5WnkWFnpyob5rdt1xVPymn7v38V83rQ2zw3uo4HbroY+K6b1ctQbB+DOaOgCg45UIPXo9EN/aWphMDjxh6De3csxvaBj6iqBBa/FHL/fIQloDBtdv4DAA0olE2HMGABV4W15pYN797Zg6ikqJXJ+qx2JWky4UGsKcJEh1jyJmifUU/+kH5hlZKjXO+ijWKghcCvIgr7QzXbXnuFhGGGwOI2FKV2iy8ddFOyDaViTFQ3+4jSWggHi9oa0THpamuZz7oCIiEkATa8LgQxJcIzSBBxGweuxRHWxrrxWnoBZLCiO+gMBCPBm/S4qWkdM3hsris6VpzD3nFA7XWeZVV1pnEyVMzV8+n4Z3z7s4JutKqbfuwLJ9ddZ2Q4YrD4BeqvQiofdMvvdHqPQx8OIdglmGz9x+FrkwTKxWi7sc9TkBxUXO20LLKSnp6kUXSGo77dxgpn9mObP/RFl4hqJ1Li//fascBg8urumKE+lWA5NL3wySFJjSOyCYep4vhQgjCMUDa48B7+Jox1z/PyWZ73JoMWgUJbZ1Ut1vP/hFWF4jL7Dkq20LjXqBVxfrGF9g3SyId663RDeev5uaLq/1NqOPk0/nPd87sefd4I6qYMfPtrD7m4gm/XbN2uYmiqg0XDlfgtJmKHjm2+3heiIgdAsKzyiKuYzcXmcLoELgTvxz/WA9b0U/9+3If7yDs2XhnDCL9RMVNcNBIMMRe+wz1H8szRh93N09zK0ttTNXnhbR3VC+dhfm8YuvnzqyyZ6K6uYTp7g6qyDKCGbFrU++vQ02R32Wj3cvFqAaegIohyFYgFpcQp+NJBIK7LHO7kvrGo9vQErj8DyrSPgJ8hzpSbPuVWuIRl0gbAjgOxvLuFZHCP//9l77yC7svw87Lv5vhw6B3QjpxkAkzg7m4bLJWnLXLGKFFlF2SWzZJZos2yL5XKJKluiXGu7VP6DpVCUZDFYNMslyrZskpJJkV5yl+bm5QRMwgAYDIBB6BxfDje6vt95t/uhp7vRCA1gZudO9byHd+8999xzzz2/9P2+HyYwOWTCNjZTxuSRCB97iPFBA+32OOavdRA316AbNnQ7hZA+aBHuvtDUMg8e7ToM20HGAey4LbXhA3sI5vQLsIvlXhpf8sDV5A+NLDytgOFyG+kcOfB7+zfkkCKuSV4+4Rl3Q5j5IaSmX4BTyEl9ed534/olWJf/AAPWbViWuaMLOmlLxcuYBhej5ceg1W32EfQlPeWn9JYCm4oPIomnm2Uds34R9TaLm9Alfuci0X/+Q/lO74IFtLtdqSqXcmyZK3I/iVTu9ZUZk6xpPz3uIgyKmL/twzY9CSsl95/0iYsbb4xUvwSL0X3P6my803tZohLlgKPFObj1Osn1HuST12CooRmncfr0FE6cGBSA6H6C3Ci4WYb4xPEBTIzn8N57a3j9/CIOTOZRKDhS6IckS/txvw8yVp+ce/cR4HrC5yb8JaIgxyB9MOtY3LxZw8REFpOTeaRcUmEnIC11DufF1IG8uO5v3q5hda0tSmA2SwCmKv509x58/x5xT8KdKxEtu/R4jMvLHm687uNYycK5QzYqDcaXGWe+czBJxUk2u9X5GOtLsSx+xWENxWEdlqus9Yf70iqruzt/E8OV1/H5p7oYHC5sCjVZZ5maFSOfM5FydAF6MeZ66GBRWLBWbjdgoiNub7rfKWxCLQeDFKwxrV8urUmqUyxpcxm3gC6FcocAOVLTRvDX5nDrKkMAExgu04JnARfGJNQY6XGMlBWCMd1mbRh1EewuNDsFPeggMmxZxgmmY+13jcoDEe5Q3gdRedNlWOUJ6IaBeENyq/a1OIJdKqGWPYwbi0s4zli3YYpmLEckErX3yEQIRSFmF5gzfwTFQwcVAILFdG5eRfjWH2IUN5AifWjvnO0+RPRoMQyxZIm6p1VO9zxzzZXlu5tQ4z4KcluPUMxbaIXMd1Zeh93O264v9/Ib26ZCEvohfC9EPs+iJrbcB4uX8In3b1QWOb9Hhx2srWfQ6oTIGwkYQR3J9sj41/J1xEYaMVNDgi4MvyPpcow57sUI4bPh4kcllJttaXBI9iShmf5e3f93XoNCfKXlYGR6CMfoHjd43Tvv+/6vsPOZXAMIViQ17dmzQ1IH/s13FlHIOzgwkcPRoyVJOez3gO3c2id7HscIEBjHLbGqKdgpxOfnG5INMTiUwvJyC7PzTXRavpTkPXiwiHTKhOf1ryhqvgVBLPPh5ImyxOHfvrCEW7erGCincfbMsISL9lPpfBxj+DCvuSncewLnbo1TeJOm1JogzWaMN1c9vPO6R3EDoxxL3JTHcKGgZ7lRibFwPULgAUNTOgpDmlj6vA7dvA9903UQBa7NvYNnx1dw8OAovIAuZDV52C8upizDqtM1LX/sqyYFP1IOYMQdsdIphZVtRT+ABwLYKFjV1NscMLrnNQLO+hd/ja77CP76LG59YGK5NgYrbkOPaZGzVR3ZXBoTI5YCh5XzaPoO4m4NwfosDCcLszghLTIVTCfBTXFcTF+yrXkgqM1AoHXgtNZ6Ju+d/i8C+py0idyZ53D7jTUUVi5hfESVsN1OoSJn+9yCj/f9kzDPvgTTVpS43bUlBBf+FIPeNaQzrIW3ee97eX4cc8ckGE5H2Jdrfrdz2cdCKkKbgrEvt/xu5z3ofoXijyTO7oUx5pe6KORMMMebXqh+AcOFjOx7ByZzeP9KF5bXQloV7ZNnwvz3RuBicHIMp06Piqt5cbGBS+/MYn11BWnLh2srbxLnFUe2X5SqkY4FTNjVMsgOF2VW1is1tJsNsC48yXLEIyXnqjO2tnO3MeHxDFnVujYOnZzCs8+rojqPGtjG6xFUdeJ4WSy8arWLrq/S46gI3Tk6d7urT/Y/ihHg+82VMrG86WWh653kRO9eXEW7Q+bHLm7cqogVfmAiL+BmKcFNpPwdgv3OHichmZHhND7z0oQALa9eW8fcXB2nTrEuONd1NeeZybHdunZni98//xLhzgFhkRF6mfmg7jZAFN5U0tIpDe440xhokdKqV3FYumDYzvJMhKVbETJFHZMnNLh01zOGlriG92Oc40gmljd6HBdWW0hdW8fUVFGRr/SZSPQy8EbVxAQYU1xYYYEX5qmzY/wf/5IlV/2m7M7+jvcdlzSW7CYxTcSKcA10OixJ2UJQm+/RzOpYSxfR6RzAkakUCllT2NG6rOfeWkHcbUBzMqIhSbxRBi5CUK+g0ZAOIg48aOkI2UZNXPK0LvsFA7vBfHe3kEd04gdw7cIiHGtFKnSFsb4hpNhtU4+FgOXy6jjCU59FfmhAofcjDfWLr8FcuAKraCIW6Zbc4PafMsy9jsjoca44EZpdUxQtCqS9bDyXru+8G6DaNuAFLJLzYcKavbS112PkadJKphXpmDhydBALCw3MrTWRaXQxMmDJPE+mEqcRzxks2lgbymFxzkcU+6LM1NtAaBVw/NwUzp0bFfcy5xyzNkZHc7j47gJuXp1HvVZHzrCfi6oAACAASURBVA0FcMjW+DxkYwgJMVrdGG0tj6eeO4KnTg/LrtnZGq5dXcTy7AoqtRZMI5biNLYZ9uYv31HyGDCMtPum7plxdsDKD+DZ5yYwNpIWS/pua8HuLd/fXhJL0YKnW/7WzAySsrCPoy/3dwfff2fNzTcwv1AXqz2bdcTLsrZGvFCMF54flzDU6kpL5n4+T54UFQ5MrPzdRkzeMQ3I5R0JoVaqHdy+XcPQcBrFgoPZ2TpYBZDzRTxhnwh5GU7jy1/+8pfbrRYuftBBxSjDdAJZUPf0InFhE6HOwiFqRRLBDmDm/RCLt2KMTesYO6SDyHoK9T21u9uT3sM+dkXPD6ESlzF3YwUZrY6Bskrdo4XD1Xh+oYHFpQbyWdJqqvrrVy4vob44A5NMaL0FlktjqFmINAumuOg/vFRKXi7rf3mxcssnEq3XV912JI7Oaxt2Vqq+Mac78lpileZLeRSyBtYrLbRX50B3Ogcr9lsi5GO/rXLdWeudVjpJavxO77OFKPDgDk3CKXPh36Z/JPXJ5tC1Sliaq0BrriOf0aAZyiKmQdSod3B5LoXWwR9C4ehh6GTlE3wBUH3ru8DaTfi6K+515q3vtHFPx2OoQxOBTnWPQx7GpCrWRYARNX8vG13D9FZQWDn3eO69XEcdy5ighkyxgPGJghQsGR/PoVBMYXnVF7eiuNm7EdrtEM1WiK4XwXV05POWPKe2Z6DaMVEaHcaLnz2Cp54aRso1BERENyKVNVaNGx/PY2isDCudQb2jodmmd0MTch4/1AUA2uxqkqFw9oUjOHt2RNqhEj04kMb4RAlDo2WUhkrIFvPQnDQCLYVuaKHl6Wh3qXTHEEKd3QaC+eztCL6Rx1PnpkBXKd1biQKz26n7tY9zhnHYxcUW5haaKBddMNa68WLu14U/afeeRoB4pUbDw9Vra3AcU2LiZDMkp0Uu50jMnMoZqwGyABArTvIdUBb5PV1K2Dbl3cnZ4tVhyeClpRZm5hpYWWmLQsFr0GvQ7127t6t8tI920w6aDQvZ3LRiqKMxdiinY7ai4dpciOwQkCnQhN/DjfZAUDySLyQFK1Pc1uZjTB4zMHRAE/f7vlrrW7pJBYJ1wbPjk6jFBt6a+TbK+SZKg1mZWIyJ05pfWmxirdLFgXHmNLMcaiBUtDHBAJKVHavccjD2nZhTWy4m/6RjnimA22gvjNV26oiDm9BTeRjFCVjMP1+5Ds1vI/LaEqsdKLsoFNKoGtZmpTi5Zk+DMig4qKCsyyDTpqOPOA46aL33XdSPnUNm+pgQzzC9rX+LY8boddlfNYp458K3UL32Ho5PR2Ihrax2cWG+hObEp1E8fhSmESIK1DlhN5KsgVKWlL4R1pqG4BVS9oeJJThCtMlZv5yWpOh7PU9Qo6PISXZTDPr7nHznFKTyQcKabqAK0rDt/VASk/7Tq6GUQILgIiH4mT6QF77zq1fXEASBst6J4gawMFdHHPsYHXRw/HAON24baAYmPvOZg5gYz4gF3O/eZt/5b8YkWYOcxVFWTgyhXmMRIh/tpgffD9BqepK+OXVwAFOTBQkJJK5Pjk/K1XHkcBGHDhXhdUN0en/tlidlUZcW6rh6eQZhu4qM+2ELXu43Bpq+ja6Zwekzkzh9ekieGxfnx7FxDeHiLOMfA0MDaYnZnn9zAS+9OIHBwbRU7OvvG93APJ5jI16u/p2ffN+3EeCz4lpK6mB+Hj5cwkDJFTc8lTBW4+Tz4HMJg4czn6gUmIaBY0fKKBVdKblNZZTlgT+4UcX6ekdwGmNjOQkhco5TAaFl//0Wnxe3PMFLR8dN/MCQhl/7Qx2rYYBcmQ+mZ2n3XI+cJbstqhJjX4+xfDvG2GEDQ5MaQnKfPJznek+TlPFmXffhFEtorfHh38bAkHDrSIdGR7JCjrC20BGvg9Q8N0yEekLOw2lBjYVlY1Su+/baTiw1zYllC5qVHgAtiQ2yDZqtASK/grhLQJwBszAqFd8IRNANC5V6BGOpK4h+otSDmkqBk3P5BtGSZ9zEouXCwez1rdc//hR6HclZBwFb22wcD+ICChNDqNs/gmsXBuHdeB2TuQo+aAyjPv3DKJ84Jsh/Etgoq11H7fpVpNrzKOU1RFqEWou56DrSTrjjc+XzpsKY9JRyggJf6Gx3sfq36bb8xHYsM0az24t374C43+n8vf5OF3i7qyE3WML04bJaFFj+lt6IbohC3sYz50YUYKgXZ9QNHRecJVy/sgCXXD+ahpav4eSpIYyOpIVSdSeBw4WKtckJRBoeTGFkKC0WB2OHRHaQ+oCLJi11/rZ1ceK/QxYfEj1PQzZjIpc1gQHOYVZbKyOTc3H+u+/B8htSBOeOd1GL0eoARq6Ez754CNNTOSGU2YurdK9jei/HcRyo9CwtNYTAhCVf19c6UjWODGYLSy2USm7PMmNtA7W23LpVkwqHVJJIiNPff3l9HsP6cy/3/VE8ls+K85qKGF90NcdjYZqTUFAPILkf98Z3gddnHQJel4pdGKZQyLu49N4K3vjmEn7w8wdw6uQAGBpYWW0hn3dEGdiP/jypbW4A6iKNubvA8QMG3vIDVJYjxMxPtiGFXEgTS6Q8Uc93LBC9O+OLxlS32ashsmUdgxOPzg2/4+DqBrxGA6l2U5CVYg1w0ey97Pyo1AOsVUM027HEsN24BoODwVVBlsgYXS0tTHXES98hWJML89ge0ICubnCw6BeiUOYZdgqGkxfLnhY0/Z2a6Sra2aCFyswNrN9WCH7DzcHMDSBYm+ldSg04y8OSZx6GLSlw7F8c+jDSZWROv4z8ieehW6byHiT92vIpL2DoITeUhf3iS1i6ksN69Ra0yWmUjx+BYUQb5WF100BjrY3u+69hCHOwbRMtCtcesK1Pvdi4CsdT1BkSFPacB1pP0DPmvt4w4TmxxM3vZb1lmxxWKgi8Pv99L+dvdHCXL2yTgLJO6OLwwVFhZGMubv9GYcpFhX/JJl6vgwVU1tu4sVCRuXXk+CiOHh0QQaUWveTo7T8pjBKBJFOJ4RvG3qXwEguo3OmJ2doK5zOvk7SR7KflxJDA0kIV81c/gGmS0a+HixGdM4YPF0cPDeLI4YIouf0ehqSd/f7kPeusfhhHEkslIpqgOvafZWYnxvNYWGzg4nurKBcdUcrpM2m1SCfdwM1bld5YR5Ifr5QgDoqaJ8nz2m6M9vvePk7tcxzV/GTlwzZmZmrieidYjrFu12UmTiLo9/fO+SyJplebwk4NDqbwqfy4eNiIk2k2fHhBiLn5OliyuHhutBeTT87b3z4+7tY3hHuyCBUzBhpvAoenTIwPmFiohWjUI7SDCKtRhMIBVRCmJ7ek/1zquGaszFJwahiZ1gUpLyC9x3SHGjnVwxjB8i10lm+g1ihgYFgT3uJkMTx4sITbt6p45ZUb0CJfFAG6fNXGTzq/mQegUt/o2Ez2JscwF74d5UAks1Geljg4pQ+Z5aLGigh4jSVenTRCktOEAUL+TkpXxrS7DcRxQ5qjW5/kNqzjnigXsoMLn9+CERdh5EcRVucoiiQurzlZuAeegjsyBd1yEAd+T4tWPfzQ/7ngBT6clA7rzNMIgtOifRO4RipbbnyBo8hA7cYC3OoHyOSYo+/IvZPohbFvBd3bFHLJdXhuyoyx7hniCtNN5Z5PWzFquuKHZ9nVxOpKztvtUz0J5Q3Y7bgH2cf5T9BefnQYR46U5f6Sd6K/3a0ClP9mLPhTL03i3XdtIWEZG81IqtpuKOD+Nvu/J3NT/XbnbOs/bi/fmVpGIXnk2DDmby0jCKpwqBtKs7EAFPODgzh1alhy7/td/ntp/2Ecw/nCbXa+gZmZqjCUTR0ogGxlDFsooR8LCHR8NCuEJsx55vzpdkNRAF54fkx4K+iWTbkWuMhzHy22dpspVw7KxZQQoFBAbX2GD+M+Ps5t8BnxWYh7vYcZuXqtgpAU1SR1okv+UEk8TEqxevSjwfeG/aMn4enTg1hYbKLdCjBRyilFsYfg/3569hvCPXkcR8YsfGrZxufOOBguG2i0YuEiZ67r61c9vH7bw+Bh1nLftOAp2LstoFMHyiM63PSDVXVL+nK/n7ROQj+CP3MZp7Q3YU/GOP/mMmbnWpiaLoj7Jp02kc+n4KZMrMyvwIqasB0C59wNs5CCXUcoRWRizZTSqknsncKNwr4TpVFZJ6itIsKVtLFhty4AOuSGZJBopUftCqLmukhOEtLouWFAt5SA5krFjQNJob9149tF135jWYS7kRlA2FxWrHWtdVS+9/vozl3E4Kf/EgonzvUyA+60Orc2KW56nXExgupUedfkGM000VhYQXztWxgwF2EYm3ntNGbJOU49QNbl3uLMl4sb/0nAHMutrtYNuE4M24iQdWIM5UKsNAwBx0lt9p5llVx36ydvm+3xk0AzGfOeu3/rsQ/0bymQYiI3NoJPfeaQVK67F0HHBYP1zs+dHYGmLWN1tYWxsaz0OxmXB+rfA5xMF+bERA7j0yO4eamOQVb6M8ndzroINsZHilK45XEtyrTOCW4lIGtsLINDhwrIZhxZkHnbshhTgGgajhwqYHDAlRAJNU0KHApughNZWY7HXL2+jmsfrIsCyup9RN3PzjbwzjvL4tI/fXJA0hFp9W2nvD3AUH8sT+W7x43ARuarN1okatIFwHbqxAAOHMjLM+JzfFxzqH/g2QfOCyqI7Dv7usSKjZ7yfpmmIZ6wx/1e9vd5v75vCPfkZseHNHzpcyk4rnqq2ayGrFrG8ZLp4Pr3AjRqEcpDuuSu8yWjTGpWI+hWjNxAIqj2wXe611HQNfidDsKlmzh8xMfI+Bi++mc38f57C1hfrUnJUhYjqFYZ02ti+vgUWs0u1tcaqsxqb0ITJU9JH8GQ1KhOeCdal8AOxkyD6iLibkVqwRvZQdFyQr8lrncZnKCLyOso0cc0jYDUsaogTBQw7723iSTrjV/yW/JJK99riVA3MkPQvKbE8OOgDW/uIry5y+gu3ET4Yz+H8rmX5LIU4LtuAgi70zqkFh6GBlq3byBXeQ2FYQ8xbJkBEePmUt1Nw2rTFAWPO1jpjgA7eqtpFXYCxTBFbwD91FWp5hYj48SwDA2NjgnXDgT5ToHNucdeyGeiJFCfiUgAows3fZ1WdSoEAXnJXN313va4kwsAaYljJ4ezz05hciInPAl7PH3jMApRurQJ7AlcUxaWjZ2P8UsUUvEw8PTZUSwvVrG8soScGyCIDRRGR3Dy9JCEPB71wkw3Lpn6WCyGViCF8PGjZVmMqVhttbAoiDk3BwbSMhc5Tfjs2O/EYjs0XRBFpdXykU5ZEopzXUusfVY+bLcDXLi4Cgp4xu557sOcS4/xMe/bpakwzc7VZQwJYDt8qIhuJxRSmfJASjwnfFZbn9e+dWgPDXPZi3oIbr6Xw0MZvHd1DW++tYTR0SyGBtMKmxETs/LxzY3fEO7JmPGFSQR78lvyWS7peGHKwR9daSHwIpQHVREW7m8ruajcrcyDJ+CCtq3ESPkSMRiTtLS/n0SL29k0GmNP4cJ8A9OHTEyOZZGzPIwNm1irdrEy1xbyj9OHU8ikC7g528JbrzaAblUWHVroba2AULNhhm3UqzX4FVrLifBlKhkt7wBgxTYKX/LCE5lupRA1lhBvsNHJUqRWo171N6ayaXYaaK33MOZJu7uMjbjxW4gzMYziGML1SAS+pLSFIdrXX8PCV0swilMoHz2AyO9K+sguLX5ol1gzWgQzlYVvD6DrzcFxVP/5f9eKhHSH8W9a8WGgodU10PYouFn7m79rUtOcVdsIVGx6MRbWTaysdaW6XEAgWRsSoyOSlVgOErEQcEfhzTnY8jRUmgoFzTmUcSNk3Z674CHOI+pTnC+xofrDAjn3uuBzrjM97r3310SpOnSw0Is9fmh4H/kPHCoqHSQB+aEfPYH3LpUwe3MRS4sNZCct5DKWvKuPUrjTUUV3OeOiM3N1EehE/XOjO32njc9lJ0wA+895MziQgjaQVgE0wXtokoZVLDriwVhcauLCu8sYGEyBygDTt54kwbTTvT+O35N5PTvXEAv9MFMkRQlX7wjXip2ex+Po73bXpIeGvBJSA+J2Fdeur8u8E+4E1wTnBQGr9/rOb3etJ+23Dwn33TrIl+e54zaWVkJ893oXqVSMfK94jN+Nsb4QifVGBHEqHyGbb8A0YxF2rFgm3mfmbz+CkeTEdEYnsNycwuzCB6zUIgsu3TSDJQdxkVqHLO2CRCb4TQHmyOZDF7AGN6qKq8+PTeUWD9rQo02oNvPPaUbwWAHRBZ5KYyMPPNsmmkzsDDHJN4eW1nvoQ0sVIOh2sd57tH6bR23/TVa4rhDcGPkRFb8nmY3ESUJ0Zy9g5Tt/hNTAX4NbzAqWQOrEq0Dr9m32/0p3pxYif/QolqpfwsL7/wYTxjJsm+mA6m5YQz1nKLphntroGmi0dfihSoGjhe328tHZXdcE9NhH0FiHqfkwtRjdjo5W1ZJxMyxW4jNg2K4wwTkWUGsxpz1CLhUJba+AcpUjoL+3D/SdT4U56g3fwuBYTnLIqenfz8a2KCRcx3riULl833hbo6MZjAwfwsLSMN69sChlNt9+ZxHPPTsqAvZRLNR8/yhgyTLG6xHhPjWZFwvwQRUMzjUl5DcVNCrbTM8iyQr3P3tuBDduVoXX3LFNSSXkq/qg176fOfMknyPLF0mQNA2ubcinmkebmjXH86Ow8Z0ulxwMlEawXu3gxo2KEN80OwGOHykLmJS1K7g9aV6IBxlfIbFptVrwPA+lUumubZFQZCBn4OI1H7OrocrOAqRSnJPSMHJI6Qv1y1cwVv8OBsPraK1U0OqaiEwXhm2p/Of9nhmsMmY7Uh7Vqd1AIRVjvdoVPm4W7diYl5ITD0HLLy1WBeBGxUCJbNLOBlLitROnpMoc0ey6m938c3rfnSxJycW3rJNZLlQFX8SUEwHfP7Qqcq/baeguK7yZyicdBdAsougNxO3qFj9173zqEXYGUbcp4DwjU5aSsHoqBz1dhGaZCKtLYFaer7FanAvTsYR7niBDWqZ32/gSm44BqzSEynoEvXIbWacLLzQk9p12lAue4En+xxg7Wefoms86kfCRb1xFXPU6Ko1QxtbSPMEqUIGgoDdIDBSSlKeLlmei1Q5RrZMD38JYMeqxtj1cp48sXML8pqEVpzF2+ABefGlaiFLuJ7+bU5lCK5OxML/YRKPpg7wFvM6TtHHhYp/I6jU1VZDYM6uwEe2cydhg2VVu+/VqMi7LPOSLl1YlNekpSRfMwDAMWVSVQHm4I0aLjSVEGXcdGEihVEjJtfkekASFG1OoSK5CF7QwVz7cLnwkW6NQZ+yac4axds7vUsEV7X6/5sd+DhT7TDh0KkXAZRrDwyl43UhQ/wxbMZzGsA3nKP84Fz+K99lPYnPPwr3djvEnb3cwp4WgDKsux6gsxCBr6uCEjpGDFjTNhzV/GT90chnPn3Qw6laBlZtoLi+h1dWgZ4qStrXfo8cYXaSZqC8uY7rYFV72lZWmcIFz3ZVIQc+Ab3UjLC3UhPBDFuUN6RRLnjuLgpCMRmj2IhUzV+w8AdV+iEA3TMTtCmCnANMWYbbtCi8zhwx0rL2uibCWwjBCVGNLypty77vQTEeEvXSW5hc564mmDzxErTVVHtYjkx1j8Ap1T9XFm30HzWsX0JhfhV9bFve97qRgpFJK1bjLzOX17YyNrj6G+uwKctGsxDzJD0/hvhGd6L2R9Mrwb2PYer+rl0RDtRkh7LZA/npl//cUKLEOqGGZigUwrEP3GwhjE6m0LcJ9a5sPsghwbGil0TMQpQfw3GdOCD1mPmMjvMuY7HZdnsqFg+lAFBpU3wbKKVkcdzvvUe9jP3n/XLgZexwaymBtvY1bt6riumQ+MJXbBxiKbW+JbVLIvvn2krjCnz03LHH2ZMaIw6T3LrJvFC4P2gcKa94rAXZ0v09PFRAKNWksCg5nIC16FjdJkPemtemZ2/ZGPuY/csz4vtG7cut2TVDntVpXlEFmhVDYf5Q3Gi6cXwTAFosu1tY7kiq3ut4RUrNKtSuEUBTwzDR50Dn4qMeqX7jfk1u+2Yzx1fMdXG76GDzCAdLhezGWb8VYm4+kylvgR/B8A80wDc1y4GbTOJAChoe6WF1ZwoVbq7h6aRGd4SNIDw7KgrKVUe1hDQjTzuxsDvXCadyuvIrDgxEq6y4W1wOpYd7LCRKhVa1HUto1csr0Sys6XcaUhflKR9SZQ9RkTLXHnLFhjVNYEeVu9ixwstl40FjohWh40rhuHNt/Z5rk1cfBCqK2odzsqSJgpaVqGNnsWP9NWf7UIkPA70IjSY2TVYQ1RLl7rV54QbXNWH7UqggvvBbU0eksoHvNRaUwgMyRpzHw7OeRmZwGEfHknd9x4zscBCiMZTFbOIb6+gVknBY0bfsp86AvAfEZXFgo/A3dQzdoYq2REWY1nSGVB1xTqGTQDUzlMjDSKE0M4vSZCRw5UhRP0r2g43caM4JzhgfTEvflgsh/K+VmpzMe3+8Uelyoczkb586MiKvy2rUKajVPSGO4uD2Im57PkvfOjeA55upfeX9N4pvnzgzBsQ20O6EoFssrLXDd4HF+EAoY7sCBnHyyn7ttVBqSMU4+k+O5iNdqvoBhWQucTHbJc+Z5BIcRWEck+K2Zmgi0p58akhj9R12IJWNwr58cs3qFdLLr8jyYiXDkcEmUoY/LmPA+PDLdmTpYca7Z9IXquOsFIuCJAyF//fHjAzJPP6r3vf1Kvc2MoMX+lfNtvN3wUZrSpN41AYl0xTtpwM3qSGcJTCDdbAg97PZQuBGCkNzyDsYnHZRKHYzduIprKzNYqB+FPn4cTjrNYAdt6W2u/AA/sbk4hDN6ANcu3sCwW8Nzz46j3mTcfzO2ykWFHMXLSw2Y6VAeujDW0cqBgcXlDiIi3CmmDVvFysWVziCNh7hTUzBxwcTpYpGLu91y1T5S9223yepH/0+EuLUufMx6wUFUWwTLvJIsgAKeJDhmYQJx1kDUWJNiNEmVOyEU2No2PQu6BT1dEla6sLaCsDKDztwVtGeuY/jzP47i6WehW5a6rx2Gna5L247gjo6guV6EG1K4b73Y9v9Ojks+tz/qw79SyFOhYWW+Zq2JFTuDoTxLxqr0ug+fcfdf2Id2F/D0HEYODWFsagAHJgsoFiwRYHeRH3e/QN8RHMpczsV6pS0pW+RHp7XwJG7sFgU4Bd3Ro2UUii7ef38Nb7y5KOxerKWeCMO79T8RshTq3CiUySpHi1jYIXoOG+6mAkFSmuUlhVmht4OpqbbF0p8BVtbaWFpp4blzI8IhsJuSIQu1F0oFMhZ/CsNQlhKeQ08KOc3ZD1ES+h4Dz2NfaL1R8JOQhRXMVNGaPS+LdxuWj9x+jolk5OoaDk7lMT1dECWVwLSPqpDb6SHwfvh+MrOCsicLW5D1a2ttvPLqHOjFOXl8QIyOJ/Ud3une+PueZ/HMUoC3Kz7yR4B0WhM2VE4Cvws01mNYTHNyuVjoaK81MJ5bwOCAJe5JDgwZy/hnp1ycecrGdKWNa7cu4OKNBbTHX4RVLCuX9269vZ99LHvqWlgvHcNbM8v48Slg4kQRntTFVm+7ZZniTr12SYfjB7CJnmRsW3DsMSpmAKaVCcmM6cBIFRB5TUHKk0Qmktq1KiNAjvMIvAvFIyB88+x38tZsdw/sBmu5M27PojBhp+f+FxJS4Y8X+58o/NY69HRZgHtKwG9VHOjb1KG5eRhODmF1HnFjVX6L2+tovFtB2FyH36hKypyVyfQEfN/K1+sjnxvhB+nRIXjXBxB6cwp0yEM/fPjGnfFWSXYjt8XFQkCHG7u3fPlwY3RpGwgRe2tYWmE7KQzmGApQaSv3IispaqIwQjtK4fi5w3jumTEBztGq3qvg2tLhHf+p+hXDdVUuLUteJgvHjic9ATu4yPFZjwxnpL9vvbOIN99exLOsZJezJRNgazcTYU73OR1THEsi4JlPTEEgaabrXXGDM+WNY1Nr+FhZbkmMn0QzRKwzDp7L2qJQ8xpst9Hw5fqXLq/i7NlhSVvaasHzuhTE715akTz5XM5CNmOLtcn3udMNcPt2XZSGet2Dc/DDSx37xPumIkAFw00ZaLY8UKn5ftw49nz3mD7JMaHLmnnsW8d+97HZNJp2P+7J2Cs25R1dYbla5Wy1yDOmJUrNLgveHec/rn9wpbvT8vrwjN+ubzEwuxbBLjBdSN8o2UoPdW09RqsWYfyYcquyQEC37qGUCZFKp8SS75cEnCjMly4N5HAcERaWZ3BjfRpGoSxxtn0JcoQBMhNjWFw7gfMXzuNzaQ2Wm5Iyr2pIIrButNf14OiqGpa8+DSqRY4ZSvASUe/SJe4hbCwLaI6McSoNgBXfUkCmJEpATM3HzkBLkSDckHNAVDwXw/6NF2LcOTssYLqwtqC8GEyZk43Hx8rVrlUAzYQmboX+RnhIz/Mh7ekw0gXomRLCxqI6kF4A7gta6DBlrl5BZ2kegy9+AemxCeU9SDhje03zOpFuC7IfGqlLVbgi4ekT3GF/N7jOxwCLxFSa6qWgK5ThG5V3eueLT87+QLPgxK0eJVB/OqAGUwsAbxXLSwW02mmUcjpyrsrI2Purxvh6jOLEIJ46PYR0ShfrkEOxHxsXCwJ0KDApfPiObBnW/bjsA7fJ8SCjHUGBzz0zircvLOHCxWWcOzMs+eIU2NxEAMSxHEurnH/Nto9G3RPrmffMeZLJWpiYyIrbX87pWe7JuNMtTyWIx25VtJjz/vTpIbz51iKuX1/H8WO0nrYAnDjXdDJhsuR0CN8zceipItJpclFQuYxxe6YmDJTkIC+XXfGMyc4t/+OaRCs/nTJBPvvxsXzvevs0SbZc/3H9k2NKryXXFz4HPpu52TquXF2Fm7KkohsFffLM9tbPZN36qI6djnaHrJMOBstZyeJRa9eWRme3fgAAIABJREFUdXtvg/GIjlIyYuvF9iTc1ysRLs57sEZ73PKSxy6hZSkSk87pyBbJDU0veAw7biKXlgSxbScG4zqrK3W89h4wk34G9sgBQVBve/DWHt/Hv6md2xaQPXJYCgvgL27i2TMBSgO0unUsrTRx+cIsdFK8pu6snsVzxRImsl3ie4y/94B1FOAU/xGR+SZyhRyMYkYQ5XTn23oADQURXCS76a4tIKILX87jqTw3hEYEvpVGUF8SYJzs7wlrlWbHlUy577coZ6oNCm4nKxiHZAwlh544ACsLMJ+eYQW2QaUh8uEvXcHaK1141SUMffovIX/kFHSLaHq1CnPh9NsdrH8wh+o730a2dhteyhCPhjxnchv0Srsm057NBxzP9RDdegVm7IlyQIWIVJWOpAaqB0gbgSRBnpZFqDlyH0ZMJD3znDdaFAGvB2toV1poNTIYKGcwXOwJeEHA7jIhhEAnRuiWcPrpCRRytgijXc54KLs4Dh/VTdzZrinC9eKlFbz2+gIOHswL+I5Web3moVbvSkEcItFpodMyZlyWCPyUYwiehWAkvueJ0EjGJIGsUKAqi/DDQoAKAmt+Hz9WwjvvrgjwjjnpdLsnG18dtkVFgOQq9JCQelblravjDh0sYnI8J14BKgE7uZX5josS6logAU5/yC653sftM1GqKpW2KKFeEGF9rS3Mc1MH8pg+WLhHOtkIulGEZR+AJsW3Np/VR2ns6BEqDfg47foo5h1Bzpv3pt084tvlYqMh8mcRhjTkDOgMGe/FLc+4+rcudrFsRhjKseiJ6jtf1upKjE4rwqEzhgr9xpqAV7z1VQwdIMaMuc933itf+Op6G9+5EOMD5xmkDp0QApP9AtUlV2f7bj6FzsmX8MYHw5j79kV85nRF6Gjff38V6wsLKDh0pd9JaCCLUxQgoGu7U0EkglmqIyiqWNMRshrddpHS27C0LhxHlYilcKOc4vB37Qwq8TDaS12A8XQS0tB97qQV+E6LYTgpxPQJJcKf8j8MEHuNnmXOWAHBe2SM66GJtQi6k4eeKStwXegpjwEpeCuzcnHmwxNJL0A7DgivS72kW0fjwjcQrM2j+dRnUX7mJThDY+Kq7K6sovba19B+71XY7Xn4lo0aCsimYslnp04ztxIi9LpKMesNdAAd7cBEGh5srYU4asNreYh1lftP7wGXc/5RwNtRQ/pCoW7GHfkteWbqk8hpwEUTvu9jrebCsizkU74w5PH5cOMCza+JMKHlVu9ayI4M4+zzU2DJVlqIn2x3HwES8jAGfurUIG7cqEoqFLm6OaZMGaPVTRIQUr+yChvHnCQ0jG9zjBP4zE7C++49UFgAIvkPTvtigRP4x+yDJJTCa1IRmZtrIJO2xStDKun+IjucN+yT6NC7IDK5n+0xRMBUvaWlNkZG0vfojt7LXT05xxBMtrLSwiuvzaJUTCGTtcV4IcCM5VI5Hvf0vjCEp2dhu0egb1TWfHLu9156wqhMQfH13Mtpj/XYVtBGq3ULlhsijPYg3CnYv32hizcrHvIHNYlF8DfKF3LJr8xEGBgjkI5aMY1CDUE3hOm1kUnxpaeg3NTMKdi9ThdvXOrganQW2YOnYJkkdNmiAezXMEUR0sUUrLNnMHNpAH/wF+fxwsICFmaXkTICGMx/3+yu9IIxF9sxhWDFb4VCqLIhfCmoAl+Es+6wBryKlZtoybmSzS7txXC0BtLpHDpuFlGtCT2dh1seE0UgjOhajOFk03B1D3ocKAFIyzYy0VxbQVBflmIxSk9LOknTVTHlxVGAqL4iYD4tUxYQXoKal9Q5hgyYOy8rLyWrISl2TKdrXXsNYaOCcPkmnEIJURwgqCxDn3sLmXANgZmH7uQwkIsQRroUHIEZIeh2wDi+tpHexhi9DsvIKeUlJjgOsOAjhCs0voooSBchTvCcQSUg4nhR1Pe75e+cBARq6loMN+fCyudRbzehh54Q7sg4i5BnCyHSNl3iGpx8CZ9++aiqqd4l0CoZtzvbftj/In81lS8l3B5264+mPbrhaQ0/dXpQLHPGwQ2TlSNt+Z2KO58Y0+gWl1r43itzQt17YCIHklj1v/f302MlcCFKQ6vtC9AvdXZEQh4cV1rXpqXq2d++XcOt2xVMTOQlfk/SEk5ztrHXZ8Bj6YKmd4KoaQo3pYLeT++f7HPogeQYkdOBeCTG1om3OHlyQJ4bFaj7e1c46I9oLX+yh/iR925mtoZLF+cxNOQJCHV07C6Auiu3A3xnpovUpIZMWpVwlTmvAStzigq0NKrc2Hw56EnWQh+TA1VkCYAXm3VzQaVwr9d9zHXLcKaOSI3uRybYe8Md+YHQy46cGsey6eDP3/068tWLGMt70DXmOm8+F96TjgiDBQOV8REsdduIu1VlXXIg4kjRzOoW4vIoQNSlxK6UNZm0ROGjxSFsPYSVzqDTYunWFNxMDnmjJuCzupWHrfvIaO0NK518AR1k0CEZD4W49I2xbFZpC9VxHHdiATj4YtVbKkZOB01uWOW/MyeeteR5Q9Lv3k3yO/vrt4R2V595FbjZkPujC53WvWcPwskVMVI2hdu92gaanV7aGivJ6VFf7rqyynUJKSR3LxE9ccGTjldPXn4uvIy3g25QCvU7x2zz7M1vHEciuk8eLaDdTqFS8aQmAO+bsWLGf+u1DjqtlniMDh8pYGgwJbesFrS+h7vZ7EP/Ju5ojmJvuB/6BR5Rg4nlls1a4ibnZbnoB7TOe0A0rxvg+NGSCFWWaqVVPzmZF1fvg3aT1+JYHjlUxDsXlnHp0jLOPj0sHkEKbQL1Dk4XxHV6/YMK1taXQFf86AiJcfYu2Dk3eK9Ly6o09PhYbs9KwYPe46M+n0sEgZ4UBvRSlIsuVqK2PFNaFBTsnLefbB+tERgacqGdHMDA4CCaLQUI3THmXq/F+N7VLsJSjFJBF74W3i4X/GY1Rm0twvCkASfVqwAn4DMNXttDxm6ABRvUor2pyVHTD0INgZmFQY7RLULgUQ0ncQG67mPoeBlV5/NovKNhpfYaBrU1ic/1W5A09lwrxPSBDMLoIFZvXhN3tkgMdpiu3oiFznmf/cPZ/4YowWWig1QuD920gVgHLXbi4U20kdZZeY312Ok6Ts7tIfCZK0/Jzp/pUifZjVtQefXMJybLnVy/J01CT8rN6qmiSkPis2Faw3bShgI+Uu516I5Qw1JEi9ceKViZIsYHLWTsQKwh0so2mc7UIrAy7NnavL9EOCefdz5N5XZXXgllpWtCSUta2s1z7zxnu3+xX64NZF0bpTznWEZO521wfDwvh5n5FpbXuxI/ffOteXHbDpTTKJXUpP9k8dpuZHf+jYJ0qwVMgSjKVMPHiWNlqbHOEqtrlbZwuPfrkDu3fPc9vC7Z806fGpQUPfL3nz45CNYkILNetxOgUHBx9ukh3LxZxdvvLGF+Pi113cm4RyVkrxuPZbtksEwcXHs996NwHJ8JAXTVWgs3btYEMEf3PImLSAP8iWD/KDzF7fvId8QZzyJdyCJa3sUtTwDd/3u+g1kEKAyyIIpqkIKd35dvRXBTOkoj2oZ87q2tgpwPfU1QrFu7IYKSC7New3K7BZvc6o9pYw43BXzp0CCs3Jew8OpBeKt/hhHnKhzBd9HtqOQpX/S8GwmoqDKfgt+uipGsus7FQ1me/UpB8l2Juh6+nKxvJnn3HaFdjVjHvafgGOggQB4hLJhgJTa1UfyzjKxIMIYBQuUyJBo+Eokm5hQistOxo3xIPXR91K4J252RGYSRyiNsrm0z2qqHURhIwReJ+Ucsc6tEfNrRYFuReDS4TrI4DEm8llsRTN8TZeDuwll5Lgy0Yca+pNvwHCp7yTht07Edf+JtMnua9ylMZjyyt4i7roYj0xkcGE+j0fZRWVzDohfjqm7h3DPjODCZk1jtPaz5O/Zjpx2GpBQpN+9Ox3wcfifwjcKXaWR8HBSyq2st0I1Olz5j4g9jYztMTzt1cgBvX1gWBD6t6+vXKgKGZTxe0cdyGsS49N4qLFvH6ZND4l6/W4iAHgIKuWLBVWVNG8q1+aiihQ9jjPbaBp+XYl9TQp3vA3/jGPDvk+2jOQJ8dsTK0F+eKKYbpmYiJ96/GeIbF9uYs0MUD6hqXYmBTZfO2mKEbjPGxHFdaqWEGyRndEPGcIJlFLIhDNPdBnXKYyIBbjMdbsPYe0zjyfvSYh+5QRPBs89i9Y0sjLX/ByPGVUHPRuIupjNYfN+S8yg4AhA13kv5oCyl1W2m0dUziGJD5Yr2MbkRBc5BpyAjraodt2DHDTmOv/HPiH3omgdfS8EQVjs+KFLYqAWSyojUeqc7tLoIIzsECu+Y+faidfUvpMnYRog7DUS6CdLbokVAZOJJUS55eQZ0zQeeAtBIvFiJTj6WkKREEu/efEg5N4RjGeh2LKjEo81923+LZbxiFuDpQ8NLrHz7E/b0KwV0T23aOJ4LFeeybWsYsNk7ZXXOL3dx8d0lOK6J4cHUvgp4oT3mYkmQ3+Oe5Bsj83C/cOxVbFoJBMZvKSjW19v44HoFJ08OCkDrYQkMCnjGhY8fC3HzVhXNViCu/4NTBVEqSCvL9YkhAV7T6aUj8re9bJwzpFdlyKFa85DL0cvz8RF2zAagG/7ye6tSLInZDPyN48rx2k9ldy/j/8kxD38ENoV7bxH66ttd3LYCTB1iJaBNWUCSNZYkX52LkR3QkS0zTrXZIS5oXitGWF/H8KgOy7YQ9O3nkYYeo9EysNotwkqnJe/6cb8+MqnDAKURHfEzJ7H6ahXxyiIydg0d35L43kBO2GUFSGZkS0jb5M9XFi9d5LFuwXRcWFETIePKFC4CEqPoSQStEkQEn1FZ0MiSw6Vf3O0cBbrnu/CQQwR9Q6hLeoLhwMgPw02nBM3fWV9G2KkLZa3wyYvbfrtVTAlwuvp1w1Y3QTQ9wXSCmLdE+IgF7Xfh+zEix0DPTyCENSymUGnYKGcUoI3jRdd8KqWj1aACV1fENncTYr373Zwx+/eNfey31pgGNT7iYnahjbfemMPzL0ygXCLyessEfQhd4lOVdCqy+5Gb+mMkIJLh4dTn+LKiHoUE3wUCtIh7OHSwhLcvLGJsPCtj/LCEO5+pUiDy6LQDoYtlXv7UVB6HDxXQ7mxYGTKnKeyp6Mn7nXR8h0+GGEiFS8yAbZtiwT+sfu9wyX39mfdDrALvXVYZLkiAKEV0vZ95alCUXM5PjtEn28dzBDaEOyuGdn2gqUfIj7B0p0qNTm6bGnB9NRZE6eC4miwb6xZliE53fYjqTBf55yxxc/k94oukDZ7lhzFYfMRig71mkv2P61M8E3GE8liI8NQxzH/rNJzZP4MVdxDbOYRRAYQINNoh8hkddsZRfe+FuCNEoG1uR+0e0l25nJP7UQsM87pJ6gN0IxvMgJdGKIh6iHLxEIiFn4xvLO3lMi6QK8PRPSmuEmuj8L0ALMkXi/VOfvnkatt8Mt7ud2HkhhDVlnp89HbP4ufLzfAKLaEAYawz8i8KBsMDQXMVVXMQubQFVydtL+AT3e+F0MTD0H89paSoX7Z+5699v/FrsglScJcbkGP7zk3O28OnEkQKRzg2ZGNmoYXXX5/D889NoERik0Dd0x6auushvBYXVnKlE8lNV/LHc/FUXjrfUzzwFCR0BVLgDw64wq3Pgiz5nCNeFDX/7zp8dz2AApfLxpEjJXhBqEhublTETc98d273Y4Uyzr6y2kat2sXTTyWkPf2esLt27Yk5gHOQvAPVWkfea0HFh7FwpbPewcnjg+IBIYf/XpWfh3dzMdrVCipND/lSGRmhfX14rd9PS43aGuotH/nSADLOhji8n6aeuHPuMPd8n1zxgGXdCShhCky7EWN1LgTR8W5mk6WOd8TUbFaHW5ll8QfFyrXdnQaRhkIOGDYX0FlZ7lmQuyzq2zWyT7/JAhR5KB3MwT3+eSB9BA7z1oMKqmtVLC5W4bXaMOHDiluwojasuA0zasGkUEcgeeuJAFPOfLHRpce0lLuxi0atibWVqiwmBCAx13R1PUQzcFUpVNA1n5Y4PsUZ07+yeh05rQorasEK6yhkIAx/TrYApEo9K3WHxYjpiJGHsL4ILYpgFCeEuU7CAf2rLh+DOBl4VfVM2HtT6/YQ7srBTCWu3jHgt5tIRRVJPVNKCRUCC76WlQI8gZZCV8vJnyKqsRBoaXhaRu7P1zPwen8+0ghiC2FMAFTfHywEsQE/MuAHLCpCEKJI+j3NAuV56sUSiRewdEyMOPCbDbx+fharFMIMQ3BFfEgb+1eteuIWFkDXPfT3IXVh35vhcFGY06tD9zcVGm4UrOTjnhjPgvzcpIBN9j2sTvEavP6JYwN4+bMHpMDT+TcX8d6VNdTqvjxLKhv3sqkZpWLRjEerd/heWngyjuU8Zsro3Hwd165XpAofmTf5LMIgwqkTA8KjTy8HUx37X/9Hcwc+vvcH/wK/9ItfxrfeWng0l7zLVb79b/9X/K2//ffw7StPRn/u0t172r2pqugQMpmAVbN8RUfIlvgisUz5/PVIXMJluq/71le669sNYPEWkM+1MZluwba4oPIF6zuQVnsQoVxO4zMnavj6lddRcz6L/OgAtMC7w416T3fwEA9mONowI2QmD8Nfehm4cQO6twzdJxCNZnpGAG8RWYAE0qUWkTtjx/ztzsWF/4o0G92OD299TrHQJcFADqbhoOtOIefacCIfdWTlrlJxdcPWVWOuYvCka+3GtKQD6KUimkFbyr+qXLk7ry0NUcCHPoLqAnTS0qbL0KmxCbnenUqBXIdNbDy6zbAC2+Li0WizXk4HthaQegYxDAS6Cx+2KAJW3O09FaU7EhRInAHb9o00UhlWe+tbXMiKKwQzmyPJy1OwkyfcIake6xNEWi8Wunu6DucsQ0Jr9UBcx2w7lzZQKliwbR2Toy7mlpp47dUZnD03jrGxjAgrCo4H2YhEZsEYFj8ZGc7LNFDP7UFaffLOVcI9RrPFuvUpcQH3eyjIEkegI9H0JJjpm0wP5Wb4nAhaZFGTkZGspLAtLDSkqtvIcFrS4Rzb7M2pu1+S7REAyNx9CkKy4zFc9lF7drJWh6zKGWJoKI3jR8sbN0+Fh/PzYYEcNxq+py8RZt9/C1/90/P4ws/+4rZn1lcXsbTeQml4FOV8auOY2uoi1hs+8sU80O1CT2VQyKWZBoT5mVm0IwvjkxNw6XH22qjVO7BdG63aOtqhidHREThmb22MPCzMLyI0bLx/8Tz+vz9/HV/8OVUue+OCH4MvItw5KYhzujoLeCarKSkUPH+nDJq5FqFVBw49zepum1zZuskCGTpuXw/RaGoYjK7g9PgKyqUBbHXJy1iJC1rD4YM5eJ0VfO3a61j1XkBpsqjc2Q+4uD6M50EGtewgsJgdQBBlkdKWVBqaEmFClUqgHYX73je65E14XR8sySrjvSk9pQKc32qibuehxToa7YbEMq2UK9XR+pUFir+OlkcYG8hEK0K5qQ+NobFqIGyubnYpQUHKfO6Vjo1DRI1VRJ0GTFrwbk4Y90SkRaGA6rANRC4RuZwL9baGap3IehPQC3JfvCgLvbhxAwQPKkt+syvJN7ptA62AfNbEUD7YLONKJUOUhM07FcswdDE4WsLokLPhbjUM1ZvdFl4akszlXVj1MXVwAKm0hZlbFTRaHYwMkITFwPRECu9fb+Lb37mN558fw/RUXlKgEg71pM/38knLidUFubHS2IMqC/dy7Ud9LPEKJH2hO1zXdanGxj7QDUzrl1ztq6stEf6c77s9r/vpO5W9yFfWNulSCbajYKYFv7zSxnPPjohisRdhJsKdNLallMTdmdc/MJB+zILw3keF40zXe6PuCxYhARUmY7+Xsbj3q97LGRpM20U2m4ElnPZ958Y+3vrzP8Rv/va/xuWZCqZO/QB+9hd+AV94ehxvf+P38A9/9bdQ0Udw+kgOr/zJq/jcX/87+Nt/43P4yu/8Jv7Vv/szrHVtfP7H/ir+5s//NLz3vo7/8u/+GkaOPYNSeBtfe+MGXv4rP4//5hd+BsVwGf/q1/4J/rc/egXTZ55H44NZuLkCUkwB+phtYlpxMVyshDhPttISi8Motzyt8tpajNpqjNFpHem8SuuWmuamhW7bx/Llm9Bn38Rk/BcYDa9jatwVsMZOCxt/p3A8dqyIlw/OIXvz61i7fEuEhbb1gT+GwaZMtFMazHQTiNs94hhKSGU1S/GVLZb53bopuhPLvJMbWyD6qj0RZVL3WoNfW0Zlfk5ocLtL19GpV+HFimZ2s/0IAWwRqE5ch44uzLCFnNNBZnAEupuXVDkCIEhgwz9NasqHChnJt5zIeduVGLtUrOOKIApALDn2Cdo/uaavOeK6towIfqBhqWbA73aFZpcimRZ5KqrCiWqCUVDofqoL/X9sLRYrX7dcpG0iDggb7P1pEUwthKWF6lMPhXnO0IjKBzKupv5SGuxE+046uM0ndUQWbylkOb1jnD41hBc/dQCsgHd7wUO17qPd8mG5FsbGc5hfqEt+dKsdiOteXMkyJts0vs1PFOoUaJVKB6RpZRlRhgB2ege2aeIj9ROfbLPpiTLEnPB+8CLvmRkJAwMuFhaagp63zDuifw/1Xkk+Q5AYx3t8PIdnzo2AmSUXL6nyrXtx0Sf9Z4nTcjkFcuoT9c/0uI/KpgwGSL/pqRoazEhMnR4VWXOfAMNpt7FcvPxd/Mrf/xWs5M/gf/gf/xaK6+fx93/l1/HOu6/in/1P/wBXvAn8/H/+c5jOtPGt869jdmERf/5v/iX+4W/8MV76K/8F/t7f+A/w3X/9G/if/4+vo9mp463Xv4XvXlnFj/7Uz+LlKQf/yz/5TfzFlVt448/+T/yDf/67OPZD/yH+k5/+Ihy/I3OZgPCP2yaWO6fw9fkQ68UI2RJduERXE4MVY+GDCNmihtKYEviMHXse0J6/iUztCs5Y65g400U+y9KqNgqlrLj1dxsoxiVJbn/mqSLK+Sq+/tZ3sXAlQvn4tCDqJe1rtwb2c59Owh4fWnMeRtgQXMDdL6fSxRIN+UPHa1zoWSHW6wn3bbTE0Ff7qUbEEcJuG36QQ2wb0MhI10uqCnUbBnxB1tMdzo0x/6xjoZsrCoqeFjlruUd+B0aOjHVktKObMQSFq26nhNQmald75WqlRqscQwta5e2TAS8Gr0cQIXHfy01dWMGYV+zVyAtfkxinsqU38QUfun9RjWKwCpxtuUjZyuW543hRJAviXcVxkwVKPAzbNb7NbylXRyGjY36mgsXJojCZpVMH8P7VNczcXken5YlV/8ILEwI2unh5BeffWMDEeA6sIkblYLvrsV/9QpvCg5YrrVTmV7N86eQEc+m3O3ubjn7EfhJdMAbqDV9qYRMt3z8eHB/iIyfGcqhUurh8ZU1Y5chV3++6f9i3zbZZy71UcvHsM6N47fV53Lpdw5HDZaFHTgT4TtflPdBtfeJ4WRQDegCoKDADwPMYBnqynyd7R2XEsg0xIuhB4Vr9RG6sLil1VTd7d+3CeXz7W29iMnUc3/hmgJtzc/ig7uN3f0/DG++u4d/75Z/Gl77wWdzOLuM3/vEfw/RW8Y1vnMfbV2/gxPnvopupYu72+/jed17FF8cPIFsu4sinPosvvPyD8N/6U/z2v/tDzM3exu1XXkEjO4Wf/fn/GJ8aMXDj5d/Hd258DwQYftw2Ee5U6uoMveZU3J0xduoxy7eJjgeGDujinieveKfSgHftXRzQruOZoxEmJzKw3WKvfrcq5rAXJVEtCDqmDpbww8Y6vvLmm2iu5pAZLvVyoR/DUPOmdRPNW+8hvP4K7LCJ+A53zc6JTX0G8JaOM8fbFBQ6ImLqd9h6Frzs5ZsadGRRCS0TJjbTfPhklI65qWnycILt3HQGfn4EmmEiqM4h7jShpQpi0eumBc0kK6CGqLGsStZSgxMRRpa7rBDdSGpeUh2IQlljjF1Do6uOHS/68EMTM1GJdWdg07shXdm6+CX9o5Smja9qtBNkVWlaKGf372Wi8livhZhZ9GClU1Ldim5kupDPPD2MwaGMWO505bLvpYKL558Zwe3ZupCYLC42hcK0WHIVIKx3a/yg0Ge6FBV9jg0JWz64sQqiwxnnJBqZgNT9FGQ7zKBH8jPvmePQaHjiepd0vy2Pnu82swVOnhjApcsrUnOdDHOZfRbwHABa8VQ+Dx8u4uatmlSMY9pjsIeUL/abXgYWwXnt/IKA0hiuIUmP3PcWAU9cAetR8J2ky/txKQAST9c1Yeybn28inWF/H34o5KFNsChC0MNZBQG9ZZZSEMkLYZgwnAwOnn4e0yPHMJ7zEUgWj7o6DR+OsxSgEoMFMB0LTnYQn3r5R/D0Z07D1qoIQY4Wpdx0AyKDyMCmMpMC34cXqvUpJIW4pAwm69VDu8vH3pAId2KZ7CKQymtCM0sZwLS3ymKMoWkdqayKs7OKWWNhDRPda3j5JR0DI2X4AdDx+HYnf3u/J75Mnq9hdLyAcysVfHv+Gvzi87I4bgZk997egx6pmybalQZqb30XZuWqZAEk8Wa2vely3vuVuBSyklwQ0HL3N0Tz3VpgzXhOamVJ9x3NCa0y5fvyyynwA2RMH16+BK/dQcwLxhF0y4KbdoXzm3zb3UBHs0XiHKLOGB8HNDeP1NAkMmlDyHVUgFTl4BuRj5VqhGLewFgphKWTojPE2KCNBW0QXqMCM2z2nr/CaEhvJU2QrngLoe4KkY5O5Sbw0PZU1TyOzRa50Hej9/eVuIBKJcTtJR9jkwN49uyw0Jcm8UYuehNjCrDI+Zf8ToF99HBJLPflpRbmFhriYqdlmmwUEBTcpaKDQt4Ri52c5/MLDfzAC+MSt+exH1fBnowDU+Ao3JnqR8GimLGSveqTY0ChePLEIF59fR7Ly03kDhX3fWy48NNLxiIy9BxcubqG554ZEQzLXp4LnzHxAieOD0jsiJKsAAAgAElEQVRFvNfPL4i7nxz2nFuJfCegj8C15dWuvEMcC1rOyX6GdvjHOca/3bb7FcRM4WPL7bYnBDU3btVAXs2nnx6G65j3lFmyW/8e3j4V+luYfxu//o/+O7z+lVH4nQ4K42fw4qkJfPGHfwDVkQk8dfIobn7vm6hlzuALP/MjeONrX8E3//h38Ucn0lh69Zu4WWvhs7lRfOHFl/GtV+cweeQ0Dpm38bWvreKF0hiK6Tpq6xU020wHBLxuG+u1dYRaBi/94A9i8P/+R/iXv/nb0L44hT/+6ncwt9zah5Xo4Y3a/bYkwp0LvOkCFOrk9fA9YGk2QioHlEc30+IoEAzDQSaXhptmVaE7XZT30wmZ+KaBsWEL+dV1NJpdOOVUz118Py3e3zmMuYShierFtxFd+xYsdMTiVupeT3HpKXdK4CcvbG+fXLb/e9IPSjkdYRAg7rb6SGuS/Tt8ss47RTBrtZMpr8d2ZsFDV8tIhTUjAc313Ni21kXaJstWGnFuBIYeIVfMIWMw/74l7Tm2DX1wAC3bhV+vIPLaMDMlZDMW0vE6IVFK8Iu41mDEbaStAEN5E65FxLqS41knxMSQjlW3hEYtpVIBoaEb6XBMenyIVA9huKzUxYVPvdi2ESObUi7/ZAS3G4FEj04Wy+2O2ek3CmBWLztzekgAXfQWJBvbSwR68hs/kwU47ZrCuc1yo7T2xRoTK13BFpjDvrTcwvJyWyiWiRinR2Cg7AoSv9PZP49Ef38f13cKrMXlpljmtIh3e4YUpinXRDZtCZsd+5y48ZOiNPtxH3yWFHyHDxXx9juLuP5BFcePDewJAc/5wWc+PJSWuXPrVg3ziw0BSOaylignjO9TuWEYplrpikCfPkgynRIIG6KwZqYAjyHrXXLPguWQd1WFpegNoAeIv3McOSZ7me9UMlh9sdHoYmauLmElNjA0mMaBSeVp2M/xvf9npmP86Fl85qUzWFm/ju9+5wMEfhe5kS4+98Vfxn/75V/Gr/+L38E/+9XXUJo8jf/op/4yjp84jL/5d34J//Sf/w5+9/f+EMcGNAykHYR2Bj/8Ez+JbsPD//WV/x3fboQ4+8M/jh/7wXNwZiv49Gc/j2OHx6SrwweP4wtfeAmjxTzOvfhX8Xf/q2X81r/9ffz26tMYPPk8fmzMwGiB6ccfr02Eu6iePblEVtXKXIx2Lcahs4ZQzEasWyK88hpaNQ+65cO07I0F8UGHhItALm+hZFawvDwPp3RUQFzxI4yD0GXdml1E++I3kfYXoNmGOA8I+WJONvPCzagjQtWHK3FxtbCp/G6mx4VEmovbUokmVoKjy64bZ6G7Gsz8AMLGWo9PPhFf24weY+N2CpHhoNE1YSAtViKZ1qhYBHBhgcKaHPRJOwQ2hUjpLYTpIoL0AFytg7TWAC1mHsf+smZ63gyRKrnoZsdEALMWPfvfRhYOWhuCmj3jWbYJITXqX8lpjKSsCGNFDc10WhYpXmOhasALdFhOiEwmQDkLqSana0ppSFKM7raIkaOnE9B7oMZHjfU2Y7X1Jyn0owkolONFcNHdrtXfROK+dV0ivjet9uQYguXo+m23fXknpqdCvPPuMuoNT4B0yXEfx09aqxQa8wtNIagpFh0FEt3xZinENLHwWRqWmQR8HkyVo/eD216s6R2b32UHr8OUNlaJY/ycvPGjo5ltFbvtmqGCwOfPGD4Bl62Wh3zeEvItItLfemdZatk/98wo1qsdzMzUMDqcESpc0ry+9/4qyGNx8vgAjhwqQ9djIX5iuxT2UpwmjrFe6YAla1loi96BuwExqRCwjduzVQkj2ZaGkaGsKB/0OFAxeTIFO0fZwos//tfx25/7SfiRei+lxofpYHBkFBnnOP77489hfqWO8ug4SlkH3vpNvPq919F2RvD82ZPwbr0CfXQKp546BNvO4Sd+7hfx6X//J9EMTEwemICtA0Hqs/jHv3oGdlrVLnnhL/81/Nbnfwr5cgkwbPzEf/pLeOlL84jsLHKujg6LnRUYDv54bUq49+6JAtzvxKguhSgM0R2vbRSNMUwDjQbQWKjAmaZwTz00zzlfcMd1cOZgA/XL57F0qQNn4jBShZQCk93FrfWgj4To/9CL0Lz2NsyVt8XKDJBCaGVg2hYKGUeEo9/xYfdAKhoyyrYlDwBrnHc0Yc0yoFzO0idNMb3pUYA047ClUXQMFxBmt0QoJ4OfRNN7nL9uFqHvo1pZlHifbiiyFTqyjVQRds6EueEnTNqipe0jq1VEGxNOe75EvdptvBKFpBb5UmqVucChQ9KYlvASkFSGrHT9ldoosCUTXljk7hxpPhaS7ORT9DKoxltujEqbQpHFg0xYJgvdKMpdCtm7PUpaMmRKrHRZqaqActG+p8peyT3yOlwE73fb6VwqG1yYGdelS1oU06wllKj0CNyvi/V++7mf51GQcBPrskdnevtWHfROHJxWOci7jbAoVXosWITFpaaydKsdmQPPnB3G+GhWPAD7JuCDCKOjWVRqXVy/UUE+ZwtAbrswwnbjSOubaXG0vq9eW5NQDD0C715aERf82bNMt7OQSpuShnfh4oq0z3AAgXgT43mpvsbaDCaZC5dbaLZ9HDpYEAt/dq4htLnMsPCEE8AUbnwK6J0UUr7yK6sq3HD4YFFy/akQcAw5Z8XTtN3NPBG/aUhlC/K3U3fSxUEcKQ5u7NbtNAqFHLy1d/HNP1mC6ebwn/3yL+Nnvnimd4yOkYnpjeP5xbTTGB3dtMTdbB5j2XzfMSZGJw5s/DuX2/j6sfqyKdx7ACGWc+12gLGj6sXmJOM77rVDND+4jsPp93DymCkocua5PqyN6/DkZBY/mmrhwtXzuHRxAfXpc8iNDQIa48cP71pb+6wZBrxqFcGtt+FEFcSMI2o2iqUsSnkdjkELnC5pDldvQHoCkwtfx9cQLnqIvIakcm1a0zxaE4EZxGlEPK613qNtTQRyrzcy/qoePAvEMHYWd2uIW2siqDccy0TSez66qVG4tgmN9dC33JDJQjWCY+M1tlxHjlVWPNGSBgIYiCQ2ToVGbWxRgVbINJchqI+kHluuw3/KkSzAw39oykqn673t66i0DMz4JoZyIVJ2JG59Qlx2k7lsj72zSKpk63AdxjG3u/I2naEg6sU5CRzsyabtD7zPX+9YeGNgYakhsV2StXBxVQJx7/29z27s22kcM6LG+RQUdWkklNEMUczNN0GyGPK5Dw6o4jt36wiFTgJkDALyYfi4PVPH1WvrWFpq4sjhkng8dlKm7tb+bvs5bSgMab2/+eYC3r+2hqefGpK0xb26wJkrfuLEgKRKXn5vTbw1vKdnzg0h5RjodgMJx5w+NSDjQ9Y+gvCYLcBc/MtXVsWqJ7iLnoN8YOOdC0sSI2cGgaRNMlYfRVhYamJ0LCcEPVxbt5tF8nx0SN0CFrdhu1Qq7+EV2W3Inrh9ZmYIP/Fz/zW+8KV5LK5VYWfLmD4w0meu/P/svQlwJNl5HvjlUZl1X0DhBvq+7+kZzgypIUWOeIqSqBBXt23tKrxehcK25I1weMOxuxEb4Qjbu7LljbXXIccekhWS7BUlUxQlkaJIDoccDo+5erpn+r4ANG7Ufea18f2vCiigATTQDXTP0TmDrqyszHdl5vvef33/O67J75gGCbjzIZJJ11dOdImMjnCUal7iiso7Xrl2Efsb38NTz5gYGkqLg9h29oJ10REm2xvDc2kLIzcn8d0rBZTdJxEbGVFV7dATTLBxKhUE1UVQfQzNlL7HbB8JK4DbRiMSqLQbstR1grurq0QvVJgTLLvBXZ3IMjXFxMfUrEylxwu7gZferbwLVMlHMwicOgImh6FHO6/t1MgmuA3lAMI3vdOkzu8Klru+bbyr2qoy01EKV/6lDANjwJ0FRNJIxw2YurcmKHfaRemEoEy6aKrrY7LwC0Cg5zZTMpGwfSQjPszOOLbb3umCdEdyagewdBfFkgOnLywpZjvnrNcbXivENw0X8/kWSnVdJMwdemSWvOiv3yhIFrFjR5RnPSf0d+MmoEFNhBuIhFkoNkTlTohxWjRDuOI4t39/BkMDCXn+tgLI1HSwDhLFMFSQJEOXryxKCtezZwYkhJCq9O3eCMSRsIEjh3vx5oVZvPr6NJhJjirsjmbiXnVSU8MFAsMoQ6aBY0d6xaeD/e8sJiPhkHjoi5pZ15cWlieO9YmanJSv5EGo1Zh/3kauJ4Zsj/LTWFxsCG1voVCXc5i1kBsXknymuS1pT3y1gKX24e1L80ilhkTNv5UFsCrx3fSvhnT/kPy9m1r9qNsq4C4TNLO+NQPUywEG9lClqhyItJAJp+LBX5zFsYM6xsbSkh1tTVR5wN5wXmR8sGlaOHgwjUJ+Ct+5cwX+4IjKD79DEyelZMMOQbPIhAYEdhyRVA5mKIDrLdu115q2eYzXcFLnPsFyNbgLYLLtVgx6vB9wqgLenVzu6lPBJHO3a1ZM6GIDt6mcHe4aV4ZvaMJl3wHXu07Z4gG2mgEjdWal82xQyqJnfS5uIKLMo3eVyAmHfS7VDcyXDURtH31xV+zzlYaa4LIJquQBRqWQk77e0hG2AnDhZBlKjc2CWVbLVSF3LTdAKBLF7v6IxNXfCzAVsAeYz7uoOsw/H8FgNrLkyHRXw7fhQGdi37MrLbcvHDFETU/WtHfbRoCihFsqtXDtRgHVWgsR2xSGN3qAk5q1rz+GZNKWffZ9q2DS7cTI+qjqpkf6D354B7THU43N+7gTrzjnFErIx4/mcP1mEVyQkZuA9fFvMxvbbIWoRfIlvC+ZDIvjJaVvmmfYbuXfqmzi9MmgdoCLCDLnjU+UxFeDY8hwTEZbyBYAY6MpWVSRa4Hq/+lpW8aX6nuaBaiFonPq5O0SFhbrMkcyKU9fn/LF2eq92Ex/H5/z7h8BpZbnAx4A+ekAdlxDLNV5WAM45TJqN26h35hGTy4KJn/Zyor9/oYowNx8AzdrPbBHD8lCY2kJe38FbnyVFkhspW/1oOGHEY5mMLRnCFZ9Fn6zCW1TiSj8dajdVZy7E4RBXhPGcWpWBprRC79ZVTpqlyp97juAGYGkka0X120zJxHyD0ie9XXP2toPJLxwmg7K+bxw31NNyEVLI0rHwrSaBLtwS8DY01Co6gLaBGbX5+SnYsDJZpeJq9x3fLYosUftQCTqQk1HrWXAMhR/QsignsBHM7CR7k0hbGmIRkz0ZukPsL79sdNDTtD1uoeWb2D/wT4MDcbEaYtt3OlnlaxoN28XUak4QoFKyfdei5FOux/F52oAJTBRgr15syDATic5JmWhAxptubx36rar+7Adzlq8JwR7LigYLresVm5PRDswMKyDAH/yuC3Oj9REEBQ3ie2qRW3TJe3jdIR7/dyM8B4MDSfQ1xuV8VKLEw2L+QbeODcrvhnsYzYdxuBgXBY1HNHu55I6L5qgSJ70ymtTAuae7+H6dV/C2mgeoXqf2d7InU8zR282IkyATAizEwuiHbgFj4t8yCOgwJ1eqy0V257IaLCjnBQNVPMVeFe+h6OJWzj9ZEJUat053HeirbT3ea0m3rxYwaT9LJJDu6AFd9uVt7duSt0kc80g2b8bw3tyYNhLQ4VJbmIC6EK9FQ2jNGyj4kVQK1fhlOYRtKrQTAuaFVFLfd2CHu+F5iURNEoitdOwJ2C/nlgR+Gg1GnCiEZhaoy3ubGmaWtFK+UIgdB14VYJ7CTpVN4GBSrmBZisA83+IVkPs2kDT0TE176FQbsIMxzDc4yNme6i3NNSaZIfzEGsz0bF8TkBUx6civkjoTVeDHQpAYOfCoNjQQV6JlE9pJwzLNlBrBLAtTRzYOiizFnCqWGulOeHkl2l7tHNS3NGJj7ddgzhV3bpdFAmO4UgdSe7uQX40Ryh10tOdbaWaWgE6pVaVupfSIjOJUYJkbnSCuuMscwDsVKtJNkT2NybzIdCzbTu5Sd918hSEQcfp9V6vjdugwteoZqdXPLUON28WcWeyLH4IdOCjVJ3NhsXbnuYIaigYasqtW4PRqYeLDD6nqZSFJ58YlORI1J6cOzcrkRhU9RdLTaFRHhwgrazKVb9WWZ0yH38+HoEltXy9yphqtG3t9PLW0Kq3kAkKOHPIQv9gXCb5nVQB8WVjyMid6Rqu5dOwj4zAYAjVDtjiVtx6UjVaYbRiIxjrn8QnP7kbly4t4sqEjzClbaXYWHEJv1CVzbcyYAY8vp2aUsqrE5VzVcM3UcmX4RSmxPOfMwqTx/BPZheq1wn4oTA004Zux+E3ysCSSv7uCY951N1qHrVYBJYdgiEhcXc1b8sHONmLjV9nUhhDEgk0a2UUKz1g/DfvBR3rWp6O6XyASrEIm5zwZgS1Ju3yvrKrh5VUxiHpBlcuDmjXz8Q8cbarO/QiVt72ibDKDFejvXzRhGFZwgQXjVqIROh1r4ujF2PYiVNLmwaEqFnRgIWFOt6+uIBnnxmSDF+6mEu4hloeQ9Wm5e9L5dzHDhcaegDQa5nOWqw739/AwQNZaatIhjukat5Mc4VBzdDEI5v5yhl3zdA0gjsXTRwFxuuTB4A26aHBuAAsAXenNz5r1EAR4Kn2Z1u9to5gJ+uWxeE2LCLoNc/kQH25CEoVRxwN6f3OpDWU0MmTQMc6mgSohaKk3i2tr+4jnxUKNkxYw4VsKBQWz/n5+aqE1508npNPOvB1v1Ory3n8/fEIdEZgSS1fq3gIxYBwjBMyVe86gvI8RrJNpLIJtJyt29k6lWz20zQ0lAtVvHpZQzV3BskkqfGW/MQ3W8x9nUcKQzPKl5B0pJaoC+lWRomCwCF4sqSiZBUBGswvrtlwKGnrGlw9LhOUgg4F7i3NQuCWKRYrMBdxoQud6IXeqokqnBSwpIulrZ1gL7XchUMUsR3hkG9WynDtKAzNEcH2vjredREn3O6NffdrRUzP1uH4CXGWo1RXrgO1Ugm2X4ZvRhGP0GnJF8AuN3SRxsmGFzZ9IbQhGHe6wU/LDJBLuKB6vlg3JNqgJ+HJdXbIBxPaBEELjbqGxZIm1L0BFxuhCHI9EUkm08Fr+aRjEyMcHBdXLs+LA140FhKbezxmiz2X0iFzjfMZ4z3eaKLtHoN77bOceMIShjp6gFO9TVspba3KI5s9pplLvT8Pa2JW7GUBCDjjkyWRGJncJhqx0Gw6KJVodqF/i47jR3shFK0PVcWrngiCOjUL77aNiwTf5TsOJOMWUgd7sGssJXz2ZCxk0pxiuSnPa0dbcq8+8tnomD1YLkl4+CyJO+1DMDHdq30P/jsjseqo1ZrwaVfUdcSTadjvogQ9Dz4GO1eCeo+W3yUF7nyomkCoFwjZSv3qOT68WgWJPkjoiLMNq92NusWGkcXt9YtNXPePIDE6Bl2n1N6BhY2uvv/fGA1A7vfKxDj0ufPIHaadl4xyBBkfLilc203gsLGdBD2h3I1mcOTEbgH6737rMvKVAuRkvpmagUD44nVItrtVwLncYp7LktuGdAF2G3osCysSBnOjU8bqjALNB5S0GovT8FtML7ocz7lc5n3sBT7IZSDjsVQbQwGaqBcKKBk+mjrNI7qE8tlg6lpS6yr/jFTEQ9hUYYEEXMfTsNgwYZgBeuOuhLZ1AJmTGOfznrgPswYs1gykPR+aeNGr8WUPYmaAuE3VuotqU4NjWOjJWIjay34fLJPSH0cwnVRJPopzi8jPckGhgWGONDGQJyBkhxCLWyJpM+aZ123H1lkoZDNh3LqticPWbk7KmoaFeZVdrDcXkfAlMW3sMMJTBU8HLHp3M76cYVnDwwlxzlLpWVUmNY4QAZ9jt9nY7+0YLymjM/RaII5mna/bVv5DKkgAuc11zrGk1ob2fAI8f+vJ0umNAtPWeqhOlydY3sZu7dND6tr2V+NV8Re/92/w2//vl1B3lVPwoad/HL/xG7+Gp/bntr++u0r08PYrL+PcZBM/8ekfRZQ+Je+VLfDhtFrwmJysnRtEwJ0PkmFTaqdalvzyOpqTN9BTvYBs0pQJfasP51bGjNhGde3laxW8uTgG+/AJWDYJdHaWypP56JtNoDp+G8mpl3DsaB2nTg3IyySSoEavccKZegiorDQ8D7bRQrVlYf/RATxxZlDCV859X0PJK8MSshfliESA93xmYGvbxUUNsM7IaDoC14FbnFIrdbuJIDwkvPBhKO52NT34aEYiyGuj8N12uZydH2hT6WwNnRJwSEwyUhzXHQTPVhWeG4Nl1dsaCMqiJLfhskMXT3jeQ2Z7i1pKpet6GhpugIWKgXzFQC7pCaATTwVMfNrmNQHtiOnL/e+sKZamQdGUqHh1AlZg6MJ/HwvrS4lApOvt/rMN3CVoU1riH0HLdR0EgcM0h5gd91DK13D2KXLpb1+mMkpc5POmFEfPaErLJAlSrGE+Jqcq4nxFlS1jxLkg2OJ8v6k7TEm8WGrgwoV50WacOtGH3l61AFS2Wk/WkkzswrHaqXbcq7G8x2ZIE/s+6Xx7eiJLYWX3uvad+DvvJedIjifZDRk6NzbCkEG1+L2fNkuZnZfifgp4p13jN3Hp1e/jtYtT+ORPfQaxxgT+5v/7HQThYfyHf/53MPXGt/DVF19DYs8T+PFP/ijs4nV89S9fwFzTR6J3DB/5yFMo3XoDf/X17yHcvw8fff55HBrOYP72W/jKV76GRfTg+U9/FkeHbLz8jb/G+CLzbBcxUzfx/Cc/g2z+PP75P/5NfH0hjWY0jp9//gOw2nPHO22ottqeQtHB9Wt15AoN6LqHvn5Ifk+ZZOyIBp3hlUzp2nBhlWbw9CEXY2MpuDtsgqM03Go2cXE8hFpiD/ozUUlJutUObvZ8Sqck0i8v1FG/+jZ2aZfwzBMuBodGoekmPNeTxBPGswfBrEWUwNhGxqhevjCB2cU8+nf34/DhnMTF0w7WUactt6EDUdRJb3YAiaSMkw+AygLqngs9NwQr4grznILUAGHNQSqRQCtII9BqbcP2gz6lgdjUSUUrnMNCnatDi6ZhJXsVsQkllDa9LtvIicvXQhLdX6bzYZvFjlIGw9xilo8gFmC+bGKmqCEV9RAJqXHJl0lTqyHNY1ab2W158FbssU4649WajlB4Etw70vKKE7u+iLo3xDzrCsT4E+8hy7k2UcTEZApHD/dImtCuyx5gV4PjUfuh4fDhXuFU5+IinWJ4ZSDgfuNGQZywzj4xIN7oTDxyr35spUG02ar48Tx0QwfttAylIl1u90JC9mkm2Erh23wux8ZiTomBBDgufLck7Gub63kUxbFv1IpQSyLvyaMc6EcxAOvVSY2dYePImefw6//9P0Gu8ibunP+HmLt9Gy997c/xn/7vPwSyWRReeBk3p8v41P4a/rd/8o+wMPoB/NLP/yLMxd/HH/2Xv4IT0jFz64/x5s0q/tEvPon/+K/+Fa7WTZitIl45dwu//mufw5d+97fxR98r4rnnTuDCq6/gxbcX8Pc/MoDFmWkslDzMLuQl16a1XlvfZcd1YxC5/g8jm8mgVCxI65cld9NAIE42GpxmCxG3jp7eGEKWKZ6zO9lXNdmQ1pNB5i2QnUnstDvxUtBxx3VRnZgFbl/C2Z7beOpEBLEUiXkCUcezr5yUaYfkxmYwxpUxphcvzsHO9OKZD+0RexqvaTQcuC2lPlcq9gcYLVHRU5XhI2hW0GrU4UVsGBqTrfB/DeSsj2okuElK6sMHqG35UlGVk1Y4hJYkElA+AmY8I96/URRlEcH6OxubSnDKL9ZR0Znxjo6YdLKPoD9rCCNd1OrY1w3kqwYqhpJYO7Z5lrGk0Fjh06Bq4XNAx7tSTUel6eDi1TIO7U8gnVD+EcqW3WnR8qc8OiJNrTxGXvPAr0s88d7dSYRClN43u/haLmv1HhcO5BxnnvO9e7PCnc5JnuPDPh5N9mBkKI6rV/N4/Y0ZME563760OFspMFhd4ta+d+4FY7hrNQdcQJAa9WE4x22tpV1naxpyvRHMzlZ2PNa9q9aHtrtWZMdDq/ydWJFuIBK18PbLX8Kv/dJbMBtFVI1B/INPHsFrX/1j/Pl3L+KnP/8JlMa/iS/8pz9A/y88jZafwE/80j/EP/67Z/E//eqv4LK/G1/49/8LKm+/hlk/icsv/Tn+8Itfx+HnP4s+r4i//LM/woFTu1Gs+xja+0H8j//sn+JP/sVv4Hd++ArCf+s38cxTZ1HSz+C//tyPIbo8lb0TR2tLbcrlhtHXpy6hYzY3ZXMnpb8pkU8Sx45aAT2hWQnf2E6K2fVaywnQCls4ua+B/NsXULoTQ3ywX0mC3SLHegVs8riw7TkOCm++huj8RZzZ6+DEsQTiKVvZbcX8rZyf2CYmYqHUSCcy1/Fx/Voemd64UFgO9sclCQYndU7OSkm9yYZs5jQB2AB+o4aGF0NIKEFVIhpeTsIZJrWxqG7epo3sepR4Rbkt6OhJ1jgtMKHp7QxpXeCuzvOhNYvQgqr6JdDg2VlAS8qzRNmF6no75KLpaGg4uoB1yAzE2a5Q1yUTYSKsOGzvWtS1yW0WKvTgVykux+/UUUqYyGVt2LSb0SlxEwtB3lPb0tHfY+HaRAk3bpVw7HCPCvHbxPXrDTO9z5lYZGKiDNrdSWxCaZn1dTYGVDAsigQmtydKsrhotVycOtnftsl2zry/T0rtzC1Pb+2x0aQwqD10O/oWm87x4ftFWzW1GI9UlbDFtj8+/T5GIFDhlZmBXXjmuY+iPxbDqY99Fj/5TB/+5Qv/AY1AQ8iO4Uc+81+h58AR5Iw5GJEsBodHZK6YLVYQzqYxNtCH8MAn4TbL+Kvf+QJKDVeifPYc/yD+uzMJPHGgF1/+io2B0f0Y6e9HMtsD06yIoy61smY8jIR9d1Ko++jRO+YSmbbbrel4Eiw51IEsp/QkdughXUUyVIMdzoqk+DDeOs/XMDoSx/HFRXw3Pw6nlxzQW+MV38xIE7rsWBROawRXSw0U36iiP1NBX1ZHknMYRssAACAASURBVGxsMUsc7BhDKirogPnBG5iYLEsCiqeeGJRQF5UOVKl6uwd2M23Y7DmUhN16GfVGBvT+NtvBQlxKuFoIekBu+O0LIOp23OtoILx6EQ03iYhtMb/dXfOvLAV0ahP4SDE0TIMeuJjLe6iETfQmffGaZ5/FJm97qLU0VBs6EhHylgP5miFx8xzHsOWLEx33Cdh0QKNT3ViPg3yDGgygtFhCfs7H/EIMybiNZCKEbHrzDGc839CauHhxAQP9MWELu9+Y4c5i6NatkqQ1PTrWKzHlq6Vxahhcsg4bGg4eyIDhfVeu5CU8LZNRvh2bfS7WOo8+CaViSxYUvT3RTS121irnYR7jwpgAT1MCedI5NqvH7WG253FdOzwCgYfiYgEDB5/BP/0Xv4X9zL0lWx2ju/cik3Bx8NQTyDXnYPSNoa+xiHIhj3KlBlgpPH3iAL77jTfwh3/6Zyi++i3MGrvx9P7d2DWcw9C+ozh9MoPJeWCopxdes4pKy4EXuKhUyyhXmdFTh2l4mLj8Fl6/egdP7h9qe1N12vHe+lyS3GWSporU0NDS4lioJOTFY0jTw9m4itfh+iG4rRCsgC1alny2ow30fqf3dPzgMTjOUZQaLcwXq3hrdhbWrTsYjJYwmi5jz2gIPb1x1BvK45jSEKUzesIytrUD7GzhRtvWerB2XzWvCadSQjOSgqm3JNUsH1IywTG3u/KMvFdLNmrlRr8xBr+BZs2BY9tgvniFGuvVp5zfQl4ZrXIDhWYPInZ4Cdw7znRqGaCAO2EHMHUXDYfOh5o43xHQ01GlKueoUG0fsxl66MCpFpFmPngEqFUdTBd1FBNJJBMZ2My8dw8Rnv4A1EYxNt03QuL8ljzSK2uZe1x610Bx8WeYuqQEnZ2r4uDBrOJI38BJhWBGAAvbBii5Mz88WeE6QHdXJZs4QM0SGdNIX9rbGxHNwU46wG6iSZs6hWNBqd0OhzA1VZWFFtncHgP8pobv3XeSHsLYwWM409ePNqdPuw8R/NR/8w8wU/vf8Wf/12/Dzh3Er/z6M+jP7sLZZ5/CCLn2tSh+/u//JhaCf4vf/df/ErCy+Ilf/jie+4kPCNPn733hP+O3Xorjo5/7FfTkcjh0/ARiMYK3hpHdB/HUYhOp3hE8/1OfxVf+zRfwhb98AUd+/ReQ6Ii5777RvGeLtSAIgtnpOfz19Txaew6g1XKwcG0Sw7Pfxuc+ZgiZyMN42SipkdDkzbdK+ObEYdhHTyES03eGwEbIWqiD1yUZCkPQnUYLrVIJzclxDPvXcGZPFdV6C/UGJK1iXy66xHPeAQFCHCf32+Ml/PUXX0b+9i0wvzLLFSlYN1Fy4yjcmYRXK0iY2dp3hGplwthq0GyLrlYM8f5RZGJOO6e8ibqWhh1UYIo0vfq6tWvZ6Cjby5j9hVKA6twdaH5L8doHPrRIGrHeASTCHqyAnvtsKdn3DDT1BCy/zj05qlqibM1NM4uevgwG066MM7tIoCaLXbmuCyVtOLTs2MVrqabPV5nOlup8X4CcFgmODq/nOXxWZCcIUGlokmjn0MEMwgT3LlX4Wv2l2n827yCcTCKVicJtebJoo5c5wWYzG+snGPOPedxpQ6d/xuFDPXL5vcrhdS3Hx8WL86IRYnw0FwZWyNiy/Z/ATu7y18/NIhaj2j8H2zI23ZfN9Hcnz+GiuVRuSaY08s0fO5qT96wz53Q0ODvZhsdlb8MIBB5MaxiR2FnoxjrhuYGPwsI8Gp6BnlyPhMd21+zUCrh1+w70RC/GhvugNSqYmS0imulBmixXAFrVAq7fmoSV6MHI8IDkb4ffwuStGyi5FkbGxpCwgMW5WbhGDL09CVQKCyg1fORyOVhoYXx8AoHNzH25Zbt0d0Pe5fuFQgHpdLrdt26nJskH1kKIUtp2xVBvZrDa2BaLAPViC6j5iCWZp3gzF2/xHHoKi0er8tJmrmUzZiKSyKGV68Ps+AC+8voL6Ndn8PGP7UL/UEpSO3YmnKXa2kgjkzk94jUNtSCGZksFiknkgeMi8Olxv3TVyh16oEeSsGLJtr1b/UwJUzl6UYLVJSWrE+gIoQGP2dpEybSNBD8ak8eG4Ln1tnd/u8GaBr9WQHk2gJsdQCqRgk37unjTG0J4RN8ElR6G/goahHCGiTl08tIrOlqq4pmXgF70ZLgjUNNzvTN5d9Y2qbAv9tdSQ0fTNcTjXrLxEdjboW4Cwe3vEqgo/Oj31vMQ2GmHbjQD9MRtxKMhFFqe+E4Q3O+1EYiUqSaQWGamLL15qyigSsIRtu+uZ2SNQkViDek4caJP+M4vXVoQ1fTRI72irr/X4qBTJNtDVr43zs0gFrfBHOmUhO/XxNAp92F+crxIB8uF0bk3Z3H9egEH9mfEJMffqMUR7SGfQ2+l1//DbOfjurZhBDQd6d6219caxYWiaew/nF7+JRzH0Fh8+Tsdm2NpHD7adQ5/1S0M7zmE4a4zs32DS9+S6V4sZ3O3MbZ739Jv7+WdJbU8JSHGuLutAG6pgt60J6QmMuk+hBFQMpOixmxVXViOsnnveNXiod12q/Z9kbytfcMoRz6G+Zuv4sK1eVh2FZHYOqnRAJTLDTRqDfh6CPQCr82RatZRWdsIgnRkoIPcGhvlVj0URiKVRMSkhKtGgv8Kra1cE8DRmFqF8jJd91RKVn7qYHIWnt1ZPXSu73xfo9KlQ51zeI0ONzDhNCrgSphI1SHO0K04zEgcumnC0WwEGm01nGgNeEYYesiWVok8T3pT00SEedhZJlXtNUNY6Zj+lTWRe56a63CIHvW+YrTrSOcAehOeSPWzJRO1lo6ITWc+1ej2h/SWEz856pnVfu3RXeqo7HSupSaAQE9K21bLg9PywJSd6y0PRP1Oc1XLQ7HYBNOhks6VIMy49dHRJJj3ezPA3mkRr2WoFBcFibglUvyNm3nhIadz3L0AnsBerTmSUIT26pMn3n3A3hkLmrmo+TiwL4Mr1wqo1xwxL3CRwjFiUhU7bMpijFqPx9vjEXg8AvceAQXubYmIkge9wt1yC5l+pXrcwHx479K3eAYnX7688UQAg+raB49Q2mILmE6dIqGD5HA/mpnn8crFi6j88Dw+9iwzN61kNeOkT0BbnK+hXqFq2oMdMtC0bLilggJdgrrEeq0zKbHTjGf36gjp5P7uQNAyXhO8Q3DQ0GJwAhsmpXeN/yZgBjZ0OLTCg+zcPkg6pAnffEfSXQb+7uFgPTxTbWTpo8NX4DLOXoduRaGbIcltH0n3Ih41EAqq0L0SfAF3lQXOtEIY6ItI3nZlU+cCTYUysnzfZ8pcmlwo9bbrCzzJJNdwNFQlO1wgYXOkng0ZCnhVWthAwJsgftemAY2mBj0cxehQDCHz3ip5lkEHOJGwXSYsUV7a9GznLVqtJeJ5HTCZmamKfb5WdyUr19hYUjJ90UbMbSvA3ukLE9s4DiQcLD8QF/U6QT0UUv4InfPW/tSEfY6DS1X8u01i7+6TeuQD4VKPJ2zJ6/DmW/MyvjQ7jE+UxY/g2aeHJe0s75daiPHp3VykRHd9j/cfj8D7YQSWJfc2RaIR0hHpsVWSmDatZxfc7NiYCE4GAVpNIDEQgR8zxfFpxyrcqGAyTTkOwvEwyn37MXF9FrXqAtKU7lYBDcG4UW8KuxwBOGE1gFwfyqEwvNIsQBa5JQjdqFI1Xa0Y6/YXBdYNGCCPfQSRoICwX4CrReW7p1niaEdwJ+hzMwILLsPSWkRsNroD46oNVJkbTMIiXylRm6KWNxM5GNEM7HhUpG/mkrb0FkJgqJvqPFXyXHAE4iEfE4c58sWvZbImJbyqQk3CFLwIpJTYswlXnOgoyVPjUarrAvIMi7PNAImwi3rJlN/SMZWCtt1cMcA3PQOZbATJeEjIQto1bfjhyuJNFzU2c2oTWArFJnI50oQq4FdjpUwGzI7GjG9UvzPjHJ0qyTpnmmo8Ccbd67ENK1/jR17LMHsuEmi/p/25LxxV47vG+TzEdjLtKNPMZjJhAfbtiNVfp7qHcpjjII6UqTDOnO4HFz7kDDh/YU4Y36gdsSyVHpZvChfWnDPEDPQgN+Ch9O5xJZ0R4CKY09rK2ajz69Y+edv5DIj8JACytevf62cvgzvXwD6J23QEVhRT8wZOEdx5xk7YvdcYWQIGF+VGxKaot8YZD/dQ4LlCg6vZNhpNZVVe3QLXUXSenHCpRjf8BpKmj1BvEuVwBI35O0CrrJ7AB3wAqYL3mYgmIB+4h1BQFymeAExpm4sAiw5wHEc9Lurv+vwivQXVW9DdePKtm23k5X0OJxBPp5DIhEBFt6G1FCsepW1hM+NLufqVVN/5knX+uqtYa58vtnopyYhH1Tjj3Wn31tBoqTj4+YoJ2/TFJt90dWhNTUCfFMXCPKerltCzQRjo2mFza9XXfYzDT9BgxIRkmrMM0FHyzp0K+nIx8VpnFi6ypdEGXyg0Rf3OyePw4aywqTHkjIB+P5J6d1u699kmUtIWB+Jid2ac+v592SU60+5zuU9gazkumg0Xfbnkpsd+dTnvtO9cKHNcaS4heVaznQ6WCylGRpy/MCu+CUwHTDNKPl8XwD95Irdls8g7re/v9fZw8Xn58luYmb4si1G+uw+68T1otqjtjODsk88hkUg8aJHvqetXgDt7Rnuka8YxU6VjGJOBdESvh9FvHbYVoJkvAb0+FJvYw6h37ToIRIalAzZVwHevcPhwOa4nWbYCAVXCKuO8W4hpHoxEHGVjBPX8LLxKXvGyr3qoeZ26llcK9K3dGJHGW3A0SyhfzUDFty9J00srMEo1BlwPcEqLCGrz67LYSdgYAc/1ENJ1WFoYEY02d9q4VV+4xmar1lpri/peo+p+3Sav+4Oox2m5b6++6WhnRaie18TO7rgaKi1Npfz1AyyUieCKKogOVumIK/zya6rs1621nRHO8wW8CaoE0kbdxfm3ZiX1KNndaOtluZm0jYHBGJijnYyFBJ6dcFbjYoc2e9LhcklLlrloNISR4aSkQF5dJzUffB6bTU/Iah5wzbjuaD2qH+h3ovoWwtkzA6LNYLpaLqqobaG4Vqm2ZOFVq7rYT6a/KLPw3f2OPqo+PK535QjQX2VxYQoRexy9vWGl9VMTy8oTt/CNfieVSg23J3TU608+BvdVY7cS3AVbfESSMTh2CnNz0xjb9XDYdznBUebszVro1+7g9uw0wrFRkdRW2KFXdWBHv3o+QmEbNTOL2cXr2LWLTn7K05v1clLlJFQt1eFzYmmPJmGaqutwUIQRi8I0B1C1o4zjWAGS0mffhx5OwtUjaApQduzVyz0jdjKHeyBMQ6Z4oK/8tfubCmmrNVy4lbyie1/SjS+fx7223A09pMO0IhKCRO2N1LPy1Lu+8VHh4kY8yO/6dWsHCKR114DrS5JdsJsMhTPa6wZ5LNsTgeNrKNZ0tDwT0bAhjHO8DxzLe28qRI9Z/5TTGimPdRw4kMH5C/OYma3h8KGsJFphVkCqyhWREoRx7t7l3/8ZXDhwLA8dzEohJLih8x4XH3SYU+r/QAhySHnM5DQMHWOSkkf2ftx/dzd9JRf4/ZGQaFbYz47ppN5whac/m4Zw0m+nJmXTjXt84pZGwLZt5Hqy6O9j4qT7kAhW1SZOpZUIylV3yTemc0pl8Q4uX7yBhpHAweNH0NsOrPecGq5fvYSJuSqGdh3AgV39qExfx1vX7iAxtB/H9gxgYWoKjhlGfy6zCbnFx+TlmwjiGYwMZVArzuDC21fhWikcOnwA2eiyI/bcxE1cvT4JOzuIE0f3wivN4Pxbl2D27sLR/WNwSgtYbGgY6s/C2IYV+xK4q0ld2X11I0DIDiQJRmewHsYnX9BEMoynDhVReOscnP4esf0+rJzuq/vIySRka2gZWUzOxXGy0YIdCS85XjG6oFJ1UCkUELgtBOI4p6QH5VwGWCgjbsVh5PphBo0lOZh1UTqmlOIFFupOIF73OkPcWE7Xs08QVZHzgBamN/r62hRexvAzIdrxHYmAWN2vFd8ZimdGECJ/PUhlK7C94pSd/EIbfK0FVP0wcv0pxCKGLJrUZH03YlMtvlhyUa35GB0MoydtCrhtDtxV71gBx5Qb6zFNQyiF6ahFdT0BhBsBlUD6sDa2hXUfOJBFImnj9u0iSuUmdo+lJFyMz0TDUz4A5VITp0/1S9vf7fb2jcaX94B/nY37fAZ4ixjPTz+FctlBNrs0lXVOffz5DhsB3kVqofiebQe4s4yWaNmWnw92uV6Ywlf+4puoNmqYzdcwVfbx0z92WgyXt95+DV/62g8Qj5p46Qdv46PPPYnFN7+DFy9NwurZg8//5Icxe/UaMvtPYyCX2XAEA6+Bt998DX/w7/4Yhz7z8/jZT+zH1770RZyf8zHan8Ct2TJ+7LknZWFRm3gDX/jTb6LsxxBrfh9TpY/CnLmIF174LoLMPnzmJz4BffEGWul9GOpXfBkbVr6JH9Ub0R4bzneUTJvVOqJuCbmecNvOunLwNlHufZ9CVS/rHQzP40algFA81o1z913u/V7IZDYtRHBnIYxGo4hwlMlk1HgQAmrVFuqODtfKQBPQbf9G5yzmyQ48OIEFI2giGqhsPSvaQu50LYJ6w0drYQLwnLZ3/YqzVIrVUATR3lGJJGg3YeVJ8k2lmPS9jiPdGqesPqSbCOl0qSPjHTdl01Z76sjd/7Kfqq/iJKfwUE5TR+++gqe3cVN+lOetrZqnBDo2FEHY3jgMjBN7OuWB/PKWyQleh/OAAEzAoARPz2wCbDeYrNGLHT3EugnwTBlKf4CXv38HL7x4G2OjCdDW3GyqWG9mnksmaSpY5eG5o617ZxROiW1uoSmS+41bRbl3H3p2uO1YuO7T985o/ONW7PAIBJi59ipeenMCR46OYexAH/YOpdsYEiCeHcGPf24vUq0b+Ld/+ALuTM3AqQUY2nMQpekJfPs7r+HIwX04cqA7an7tJtfLeUzOLaDiezDCOmZvX8crb47j+b/3P+CZ1G38s//jqxge3YXnjg3BLU5juuTg1HNnoZ37K9yemES03EDvwC60WnVc+OHLGN57CE8dGBNfpLVr3NpRBe5tj0NeylfDbXrQmQxFZuKH+7KIJGWFMNpv4Nr0OLzeQVhkD3tEkxipSu2IDTMcQ4sJUtppTQlMInV7Pnbt7kfiCKWojn1a2ZI5EV+6sgBnfh62Vm97dHehoKjGA5qSheiGHo2qfNGNL0vvtIFrJkKxDOKRQDQAy/C7fMNZe6CbaDqGJJwRw7FoE5bPWb1H7YQRsiXWnoF0QTsen4sshtXRiY97d+u9yVBnQg8MOD6d+Tpe47In9vGVdSlzA0MrGZtO346OQCZaDppl2jnYNwJX/sYFgFCulhykkpvnlO+0h31evW1U5+pzd/K7chhkmFsFN27weQNOnuyTFLF8ctjyeNRCLB56pIuQnRyD9crmAoyLO2ozbt4sSqTEwGAclWoTjYYnznWdBed6ZTw+/l4fAQ/z4zcwUy7iudExmPk7uHF1AkcP74at6+gb2YU+v4q/+INXUfNCGN5/GP1jUXzzhxfRc+gADN1CuVzES9/4BvYePY09wxnxg6F/jswbkv5bpZG24z146skP4Nr33pRMo7HMIPbvGUDhznW8dfMipq5eR61OMjjA7t+Ngdhr+N5XvwjL0PHBZw7gWHIU33rh2/ASI0DDge5W8OrL3xW63OOHhjfF3bHR3RRwF7/0DuZw9iB7mFCobnTpzvzGAaQXN1Wu/RO3MHlnDKmxYWha65G8t+SjDzOZTCiBao1e6moTfKAjVNiUTGA9KU1irTu4Qemr6QA3ri/AdCswQyp+/K5RI1mMZiJwy+LIpmLilwziKoQhFIWdGUQiGUVUL4s9vwPuHbc3lst9T2P8OXlzaAK498ZzDMazhyJoCuYp4CNwE+CVql4CxNu2eB5VpgeGwvmOi8kZD/RkZ98DPSS26g5lbKcFfMYIVDyHmeFCRoBijXUA9WaASDu0rHP+ep+d8U0mTMzMNVGre0jEN89Hzut1hgF2qxDWq+whH++YA0hnfO16HqmUjVMn+pFOWTJ4nUUJfRTeTxI7nxuOTbnSxOJCHRN3ykLVe/ZMv2gzqhWmKqbW5f2nxXjIj+i7ojorEsfQyHH8yIc/iNvf/BP88bdexoc+9SPotyCJaFw9jA9/9mcxPvV7eP3cZfzqL3wcP7/nIF5/8wJuXr6Mi3fGhX/irasF/NzPfQoD2Qgmb1zE21cm0DOyH0cP7UYkpMMwLSSjEZmPW00H8d4hPPnEMfzw4pu4UJsCUj2IxxQV7yvffhUlP4df/fVPY/q1b+BrL1zAE//tZ/H5X96Na2+/gVcu3cFbr70igtmlN28gnP0lHMwt2+vvZ+CXJHe5mDOthAvRUWyt0KfNVUG1GbfORLzRVSIBr4oVpqc3ySye2FdG5epraIRDiOR6yDCzUVE79ptu6Sh7Bqp1BVDdFVHSIp8582YT7Dp95hhUqh5azVZbGl8fagmXauJu34BOBb4HhKII9wwjlbQQQUnS0HaAnac5WhhuEJKFj5DEMFSOt5Hha+tX2a5BSfp6KIQwKrD8UlshT690wrGS39W/VNrHJL0q6WeVWiFAQEZDavMlHC6AZ2WQiscRDyv18VJXKJm3GxQl41y7BDbRIXEN6Wrv2V5VGqXsZMzAzBxQqrgg0BOrO5qATp1rfmpkO2Ocuurfmuc8goPsO7t/42YBVDXv3pXC7l1p0QaRMrfzXD2Cpj3SKjvAPnmnLKlyeZPTqTDGRlOwQjp8N0A0YqqEQXyoHm/v8xEwMXTwFEYvn8MrL34HxbkaDhw+DK+UR8nScfvKG/j+pUWcPLobrmkhk4oJw2W5VMBM3sWBfcO4/dIbqHthGJWamOk4oLpmwDSVP053GB8leqflQBdulBqmpmeBeBq5tIGnM0kMpU3kFxYRCkcRCdUwNz2HUqGGWKIHoVAIQX0aN+6UsO/AbpTHL2M+34SuRdDahsgPBe4rHgc6G5HYlM5VpOTklLO5t0aAOghQLDri0HWX6MiwpyWiEFWsYWpIxEIyuXdUo0r9YeDgvgTqlQl8L5+E1/Mh5UH4kGc5VqfrHowQQXrtWPcVw8cREwlVA7PJufUKDN3vsmKvPJtAzftIlfSKjeJZKA4rOygpTQns3WQ0vCstLYxSI4R6uaIc9TTycLfgOy0EtN3fdQNW1CDlaaYN07KE5W7ZqKDgnGcrvHXl4dU1ZcfXA5UkZkVp7DN7qflCNRsJ+XeDrSps1WW8RodmGcoMtIn7y1MIzulkCPP5luR0p/QeCil/gxUVrPrCPjaaDKOjlMfHf9W4rzr/YXzl80Lv/dvjRdwaL4nH/OhIUiYWxcb2MFrxzqyDjo90wiKJUCJh4cihXgXqASQUUlr96G/hO3Pw3mGt4utPDYzSUK0xGWyxvZ2yOs6xnct7dp3Apz7SxJtXbiHevw8fPXUafmUa+VZC7Np786/g2pUbOPDUh/DkmeNidmzBwK79R3HqYC80I4ILN4s4+sQz6MsqyZv2cP6t3nTTxlM/+hHo/WOww3HsObgPc+euohkbxKc/9AQyVgu3J+Zx5LmPINF/AecuXUIktRe/8KlnkAkDCwsORvcdwv59u9BveXjx9XHsOngCB/vWSb6zugEbfF8J7rrysrabc9g71IBhMh5xg6tX/cTBrlddvPbGjIToMPa0G7TERs086W2Ap329VGpioC+GI0d6lsKTOEk3ai3cnq1jctqBaziw2sQlW2nPqubd39eA6zQHvZkGknHFJraZgsiEVii2ELRINMNBXPth5lHPp/coE8+w5DZVFyX23iGkEgbCQVli4ZfLCOBrJqpOGNX5afi1hSWVgkNP8E51RI0NNo4lgZUqao3x9rJEWHnN2ref56w8r7saXkPovOvauw60r9I0Aev1S+wuXe1zIdibsWSCvzPbhLnYwshgGOmEKYxvstToqo9DwfJbTiD8CSRK6ai57y794R1hu0IhA0xCc+lKHrt3JSW+/VE79j28Edi4Jmpkbt8pSyrqPbszYvLppFze+MrHv77TRsB1XTTqDTQaNCuJMfiBmkgfp3qtLgRjHcGQBWq6iQMnn8HogVPQLVts3G5yWHJyMPPij3780+KnEYlF285rAQZG92GgbYp+8rkfw+lnPZHU79VA5gV5+vmPLZ225/BZjOw5Co/myZCJwPewe08MdtjG4TMfxK5DNaH2tttmyMzQHmSHlS5g38kPYvcxD8Y25VlfBvf2fF2fX0C8cBEHn7agG8ww1TVDLnVh7R0WwUmJIE9vX2Z7okpxaaPqQrynVGfo67WYb+DGjQImJsrYu5dejQHmZ8t4/WIdl1t7kPeeQSmfxOi8j3QfIALu5pu0VPV971Byt0Kow0KtdXcM+lrlUlXfZPaxakPFv68fuSYgSbs+gUaBcoDAjiPSM4R0XEcYZYHJblU8x9nVLNDO4zfKSp3Ng7IRsbmzdKB9fL2P7RpMVQ7/NXQ1TquVEeu14H6Oc2HCd2Co30YuE8L0Qgu379QRDEbE94G9b/sGSvEECdrnS3UNZ0/2IhYLiXaJ4PrQF4ztDnNRpRsayFvPXOzDg3GVZ35V+Nf9jM974Rq5NwgwPVNFNBxCIs579v41Ubyb7ynNlJYdw/SshXKVs/xm56f1e815ttEw4Tq2qLhXnxmOMLJJbWaoi69FMxCPd0vGy6Gx6mwKG8vQ2Cljs58hOyI6b55Pmm873AEADZFobEUx7EP3tl3AzjLv6oFTc2B6LUSizJ/Lijc/+cuZGnmyDcRjIUn2sJpdq7sj7BjDeZyWj4uXFlGrt1Cr+biykMBi9AzCBw5hMByDd7GFm+cd7DmhI92nS4KTLTSru8ot75M9zrAsVI2EkKdQwiYD3L2lPqqIOSIbjR/t4kxl6UkIHFOnanoU4QyB3URYbOy8vvsB4DU66q6NZnUR23L3jAAAIABJREFUCJhxjr93n7O5blKdxYUFVdQdL/l1rxTHPx1GsEGa2Tbb3Krndd0iH/QHpXkAImEDg702bjsBJqYbsEOKd1xFe6haODqFkgPfsEG2s2q1JRI8F6JccHav/B+0Xfe6nnVSDc9sdFev5yWn++BAHPv2ZUSr1a3tuldZ7+Xf6YdRLDYE0Af6w0Jo5UmehPdyr9+bfWNegEOHjqNS2S1BPNs1R3COpe06mVxO6vreHMGt92oZ3IkhZFrNZuDOZdBsVBCJdkndWyibky4l+M7fepfSrcoINOT6oiDjFFnY5hZaWLD3InbotNhuEbSw61CACU3HxGWVMz2V0+DT/LsRbq5X6X0cp8raTPZhNh9CuVRDPJXclEbjHlFoAsf0bqBGnjnfKWoasbQwblFiV4uDlaDNby2EUa3W4VUXlK39AYMmVD0Pria7j6F94Ev4DNAEQia53cMRVOttEiECdvv5kBEMAtQaAaLJCGZnqlhcaAB6gFg0hF2jKWF7o7f1Tj5TCtQ1YTWcmCxjZqYiwLVvT1oyovGBeAzs3Y8E5TuaCn0Zs84y9yG99t0Neby/DSMQi8XAv8fbwxmBZXBvqye90jyiRkVsoA/jJeICIBYxceJ4rzAIRaMlzN5pwG01ERimpGDVTWDsiI47VwNMXPIQ+DoyfQx9eUgAH3iwMhnMzqQxM30b6Uwc/gahVFyV8u9eammCDr3SCe7kc4cZhhVLIWI01Pe1pHFNRws23EZeEd5s1xJYNAydO86Wde93P4yccNlggcyuH3i+OsYwOP51Ni4e1gNNle6mU1fniq1/UvIWzvlE+5Huap7aDcQWbyVsHD6cQ8vxUC43BWAvVBxx1EqlLHHeWq+tW2+VuoKgzj/aimfnGsI8x7wN/f0xjAwlEYupbGePhdK7R1i0Sp1VGp9J3swtPC58PTj2oqUSd5aVCzj1riot23bf97t78/4+wsUzEx5t9zhTC2aT0erxtmIEukbEQHlyHqnZH+IDBxxE4/TWXXHujn3hu8tQMtplspkQUjenMFspIoj2CfwR9ygFD+6jR7WPySvqBSXAi9S7hZf9vjrBHNuWiVruIM5NFxCOFDAymoJuqNCIbrUuJxJK4DMLNVSKlXZMeBfSrGqAF+hwnaZI7oYdlbh5Zmpfdwt8WFoTkWQKNacOv1aQ8LQOsK573To/0PpFuG5pUYmRXzqNanhqBORN5AAbkrSGyxEHHVsWz2HN6gYwA6wXmKg0qOrW25KzOmO1R+tSPUyZ6+iILk3gS79seYdNlWQ4a1zJ2xINa5idL6FaTWFsLIXB/hiGBuO4dGURb16YFS/1XG9M4qW3Q01P4BD1u+Nhbq4h2eeKpQb6+mM4MpwUbQGb+n73iF/jdskhPjOFYgPJlIXRkcSW33Vez/u4sFBDsdSUpDM9PVFZBPI431U+dlxomcbmQzHXa+/j4+uPAJ2Gr127gvGblyQZEh15H3TjrNpyHdjhBM4++cHHWoFVA6rAXdfRyBcQd67iRw5XsWdXCkwDzsHbimDIl0XAbVUlm/3KyTkeD6EvUcFssyovM0PmeZwgLgC/X4duarhz2RNhNztIqrOHIMEHHiIDQ5jRnsHXLr2O0/lZnDqeRjRiiBxLDQTJ/pmy9tadGi6+NQW3vAjLUOC2/hhowkvPlZRuWgjpXFGpOPC1riEQW6gjZptohuPw6kWV9W399cNaxbSPKVIDenQyVp6cc8JGBx8ebPHIDwW1tjwewPSbshBwQH8M2v5NeDBg0u7PUeC9cluYnXHVcxME4vjX09+DXK+6ZnVjqIa2Wr44S/FZU8uE1Wc9+HdO4rGoiXizhavXFpHNRsQ3hKlDjx7J4dLleZw/P4cDBzwMDyagGQxRvD+zFPtBsGB0yPRMBZOTFUlVytzrT+wbEFBXwLNSinzwXr73SmBynKmpirDScZ/ZxTa7MQ6e43/+rXnRRE5PV5FO13BgfxqxmIViqSVaFDr1cpFHXgE+gduxsNtsG98v57mOj5mpcdQXf4hEnLkzOk5m9z8CxBqvWsPMXAa1o6cfg/uqoRRwJ7mIXb6NfcNlialmIH3YVlKpCG6rLpKva4KJLqpHbwse9quLdp0ADTcEPRZWGriu2Z4Az4lzYBcXETqmrvsC8LmRhyDB05tdA2JDw6iFE3jx8jksVG7hwIgrTlAOteohHeMTNZx//Sb80ixs0xWbYVcXlrqrhk8BPx3pOKnoVlgeepLFGKgvMcEtXdTeUbEGZI25P/DpLk/a5rsSbmcFFfmJavcW4nBgy/GOZK6gd/mbB6t9DkltvKW+CtYzZSxpc0PkSB/Bwb0Jae/q54nfiaGKgGfnFmm8d52Ni8SOsx3VviFTE7X8rXBRmOEWFxvYvzeDSJTMd1sDYEXgRA1BDbduFVGru8hmIuIsJ+lKuQwSB74Hv3ed/rxXP+m82pONYCIawrVrBSQTNpjYhwvpjTbeA078lMjHJzinWThzul+46Mn899rrsxgcjGNuvoaAhEgJCzduFpGIW+hjfLEQLm1cx0b1P/5trRHgPYHcv1iUDskPLrmLIOmbaHjUunS94O3qC4VFLNZc7BnqQ9Cq4NwPX0HeieH0U6eRiS4rrd1GCedeex2LfgxPPnkGSZTx+hsXEMSHcfroLtSKRZTqLfQP5O72QF/V1XpxDudefwPN2ACeOHUccdPF7Yuv481bC9hz5BSO7BpoC0vLF+ZnJpD3whgdSGPq8nncWghw8uwppCwXM+MLiPZmkYhtna1OekiP6aRVw8LsIvLzQMgy5AFnHGG3uldNzArkVkvoSrmroV53RSJifmrlqLXciY32OMGGTKBQaaFQM+Flbcl+5rcoFS5vxEFfB3KjurzAM7eUBN+3iyrkbcG75cpW7wkSuYhlk6gdfwavXurF7PnziHmz0GwfhZKHqakqaqUKogZZ2W2JH19dDFURfBZ1ekwjJJ7qWigM0w7zIKpaXOWDD2pC+aqmGfXwEgSZaKZa1+DWSEXbdlG/q5JNHGCRXDHJqonn8y7ypeu8KATyzj5/794P2ix2MuxtYFe/Cz++xPUFEupFW7jQ067RJI4Df5f88Ts0n7JVnMTniy7yFeDIrrgiQmmbAggWfJ7prc7ojavX8nj1jWlxtBsYiMm9Us/+Gh1oa7eofudIlMst3LpdAhcItOEfGWMKWZZBB9OtLRTWru39c1Rp6zQcOtAjZpNzb84KSFuWcRfAc3LnPeQTK5niqg4mJkqiMTl1ok8kdybi4f19441ZvPr6DHaNJXH8eA7hcAhvX5zHhYvzqNRS2C3sd9TcKKfg98+I72xPeY84n8i7vmIuub96pSyWswrYGeFUnLqM//if/wum7YP4n//up/H1P/0z3KkHyGXS+NbXX8LHPv4hJGwDbr2Ar3/5S7hctbC7J4YffK+BysI4rl+7jVorDtf/BLTCJMp6DwYHchs21K/O44Wv/zVuVQyEa1cxMTWH02MxvPi1H6BnVwZ/88UvI//cJ/ChM6NL5bTmr+P//K3/FfXDn8bPfWAPvv+Nb2O+7uL2TB5PHs7g6ngVZ5/JIrF0xeZ3BNw5+dWqpL3zcPRIVrjSCdKyQu6ezzl9eQFcGsHX2Kh6oVo9m2UOc5Vhi6dx7JcmxzaZCF9EifHTmAbQw/xcHZVaIJOi5bSgTV1ELRxBJB5VbGtLBbQBXAN6RvigGLhzVbWVNnlu2yDQrtG79iE6h7kOYnEDxvEjuHM5iuBSBTH3Ggy9inKpDjuZgR2it3Yg0utq3ztKJCRToYqx4WrQrDjMTBR6JCWqcSNowUEUvhYS4gWpmWDJXPKaKfeqNj8Fv8kY91U3aP2Wr/ELr22Du7xsWymLL2qo6xXtvrazrz5567r/Vjek69au/mlbvtO8t5h3UGmaOHg4J/b21U5+lKbZjlwugmTSws1bBYk9pw1/9xijI1YCM59fAjqlESYtyRfqErY1N1sTjePBAxn09cUEcPgebWWhuy2dfo8UwvuSSISEbvbSpXmxkYfDpnjPd7rId4BzCOesubkaSFVLSmhqH48dzalwW9rLAOGkP3q0F/0DMfT2RNqZ5Hwc2J8Rn5/r1wtCrCULgYQtrHi8d1xodDY1nyn1PeumpoC/368Zp1Pu48/tGQHXaeDm5YuYmZlGLT2MWv4WXnztIp77mb+Njwx5+N3f+VO8duQEPrwvg8r8OL7z0lV88G//PXxybxO/8/tfwndfv4F9u3Kol2fw6g9ewUh/Dmc/cuSeMUl+YGJs/wnsicVx/i//BFeuX4JdMrAYPoBf/PEP4gv//t/h/IXz+MCZURUH77fww5e/jVdePY99ez6K+fk5TC9Wods+rr31KjzvKE6dPoWB1Naldo5kWy0PmHYEp4+FMTgQFrUh7Vtrb+0EIat+5DTemaTVRL4shglvOMVqrU1u4nhoVpmTuyWLgYXFFr7xVhjz2hia5SpOZm7ipHUBl27UUek/g2hvj1DAdhCC8qXU4QE5AXgdk1eZxc7HwG6dGLjiZVzV1G356rsewlYA/chuzNWfxfzrM+iPTGIozUWLIeDOijyf+cbbi8v2kDCmPF/2cfnSNJzSvLAk6ZEEQoYPC00YQQN1JOHCgqk1RRohttd9FerlLE4hqBeWVMvb0qEtFcKO8I7LXe8C+C0V8lBPZksNU8dAfxSRsKnokVe1gJO46zKNrImDB3rF9HP7dkF4/clnToDnxM4JvdH0sLhYQb3uoFp1UKk05brR0RT6+iKStY4T/kY8D6uqf/x1gxFIp2wB7LcvLgg5lsoMqCT4CtMu112R0kktPDiYwMhQXAQMLsJESGmXTTMMFwdjo0mZ5zq/8byD+7OIx9TC7kqxAV3TZbGXSoaFiITPEJU9nusr9XIsJPUy6oIJpGg24PbYZr/BjXwIP5mhMI4+/WF8YHIKP5gG9FAcQwO9aBQXcLM5g4uTFxHL1wFkYIXjGBpMoV6YxZVLs7h8cRz7j57Enj4dixUdMctHAwFuvfpdFIf34sCeIayX48qMp3H0RBqzN97A9YUaooO9qJXmYKYyCEfiSOUSmDRaIHl3CC4ufv87uLRo4vlPfBxOMoqxQ8fQmp7ElbyHlKXD0jXM376Kl8uLOHb8GBLW1kwZbcNDgJ6MiWxWvSwPMiF1S+nCvjVdldX0yGgcmu/j1u0qyg0DJT+BfCWCjFlBQ7NRHn4asb7d8G/eQlhbxFPHHPRMTOLFKxUUiicRHRyEYZkCaMzqJRIrM5HpAfr3GjBtH9PXyQGvAJ9y5Y5K8HyJPR9WyEP60B7kZ48iWp1CMuzAC5azx+ltvxGR3ttStmH4Ag5w6/DrReEX0OhM5zdgaTVJDhOFyv2ukdhGAxp+HNViCc38FDSv2U6MzunmAbbOqoN68U1tywu2TZ3+DjmJ3UwlQijPNiQxSyJhi0S93iRMAKA0tmsshWKhKXb40yf7ldpXg+QMuHZtUZz/KB3SOWtkOIFUKqzS/jL2/gH8Tt4hw/bIm0HNHnNPMGxxcqosOQFIPkQzIjfOA6IR0QJEoxb6kzZSaVvuBx1xBYjXsM+rRdzKZ1lpbgLQDJPLRdFsumKvz+cbKJVbIvFzFctkn82Wi2bLh22RwdNDveGAz9SRQz2IRJiCuENg9ciH8H3ZAHriMzOcafBeNWGnhvDJj34Ql8an8frN62h6EfTE1UIs2juC5z/+NM5PTOPc9WuoOTqee+bj+KlnhnDr8jl85/vnMXX5Lbw2PovYrr343E//FI6NZlFemMR3v/NtzDVT+NBzH8TugaREPLUcH317TuHnPl/B//P7f4E5+Igf1UWKdptNwAogYnNxAl/5yy/jupNEsngF124AH/3Y8/jEz/0Snp0dx7deeA3Td67g2uUSmoGNWyUbP/Phg5B8V5u8qwLuBJ7+HhvxqLaUf3aT1991GifSzkbosWwDxWId584XUfBHUU+fRWwwAT0ag98TQsWpwbRNxHtyCHQPxUYTgeUKK9zB/VFEI1VcufMyZq9GkQ/ScMO98KwstEgMobApTmwE0HhaR3ZYx9QNH65rom/MhM68Nx7j4rt0ap3GbdNnQICPGNCSadTyJpLhloB125wrtXBMmh4TpaqVFxnUlCN2216k6eJMp2LD2VbaDrtNH7rEzLuNCuBUISuYbWm/D5hRGOEodPGC6y6Ud49WLSbLYZvatnhZoKjxVAs5dU7H8t51+7sLe6T7HH9K7cmYjuk7BbGtUxJcD9zZWEp0zBm/b18WF96axa3bRRzYn0Wx2MRbb8+L8xVVudRwKbWs0iZ1JMFH2uH3QOUcU963QqGJy1cXRA1/9swAcr1RWVQ1aDZkOJumIxIxRL3ObtO3guawbtbrzQ4H6+P946KCeTEOHewRDU+j4cJpL9Y4V/K8ak0lxyJXOTWJpM/+wQ+nZIG3fx+fi9AKjcFm2/D4vO0aAQ+Oy2fEgOfUcf3qTTTMGDKDe3HoiR6c7LfRaDTgNSu4df02qqEcBvsGcPqJDA4fGIBTK+DalSmM7RpBcWYeoVgarUYR+RKdh7MgxezA0AgiTgyxMIEGqM9P4CtffwmpQ09hTHMRzTJds42pybdx4YKG8dkm+o71Q2vWUfajeP4nP48z9SZe+eJt3NFiMEOGaLfHb96CH08hUzYw04giqQeYmioIIrSl8U0NkpzLSVrm7E1dsvmT+KIwBGrv7iRevBBBY/eHsOv0MEI2BU96VxMSmMpVdF0CwkGzBdNSq24NOkZHE+jPNVEp11Cp15CvTGNqQUOlSPijciMmTmCOFkLKjEoZ5bciMMtx2L1RhLO8ESTDIaXd5tu+6TNpB28UEFQmUQsi8IKWJIoh5HHjZNDyDYzPtpCfK4rJwDQNkfr9Zl1+l3ZxDNg+uRGy026CarT8yyK3IT5UCpZEMQbMeBbRsAmGvHXnvCN8kz2PIXHc434rsOVemZIdLxAyHemjeMozfM9corE1Agc6amKTpsrzkW8MhYuYKJQdzExXBJzV4mT9lvH5zWbDGOiPY3y8jP6+OO5MVQT0jx3LSapREtM4jrpH65f0+JfNjgDvCf11KhUHN24VJH87gfbEsT6JOnBcT6YLZojrmIX46jyItnF127g4UGvrQNqyXNfymT094SV/Fz4n9OovlZu4di0vmp4Tx/vkVWZRj7dHMQImxvYewVMpE5YZxt59w3jxO2+gkBrCJ376MxiOunjtwjX0DAxj74E9uPryG7gZy+HDH/84DvWYKM5UEE4O4dSTh9AfM/DVv3kVQ6efxcl9g9KZcDyLk09+aEXHwqleHNgzgJe+9zVcCWXwsZ/8PA726Xjxy1/Cn//F32Do+DP41HMnMHvjbUybgzh79hm5ftBo4KCbw3CPBTQW4JhhHD5zGANnx9D88ldRDPXhxz52BPbWtPLLnv078QxSPeW5LqbmPJjDRzB8bAjRSKvNLCeKc1HVywvQXl0wRImrdvU1kBWzHrKR7rWRhY9hz8cRjx7ydEhz0GwtymrdcQE30OCMusgXgVrDwMJsCDPzo3BGDiOcTkIPdkKKp0EdMKIxeNk9qNQuIGY0YHAV1sFq3UCtXEH+1lXF3NbuKzGPiQWoWfCdBrwgocBR+NsVIPJfXzPQdDX4TnNpQlnxVMkX3sFNgqjketdhxvuQTCcR0etth6/u6wN4TE6jx0Ryp+d/rQm0SgugqYBVabaGSDwOU6c0pcgGqJ/wYcLVbWieLi8W443Z5Uc50VG64nMVCeuo1RzxyeCiQ9S6dw/m0hE6Pw4PxzG3UMO1G3mxr48MJ8VZaysx10sFPt5ZdwR4P/jHiBPmtafa++iRHmQyEfF/aLXcpfW5ItjaiVlrZfOWtTsr61pN8EV1PD3xKfUz3G5hsYYOIdLKEt+/39S7prSAnP0fdFOzVcdbd3VpIZx6+kdxgumhdQP7Tz+LoX3H4MBEKsFwRw9HDh+UCKXQUC/+1p6jcGEi2U4ok8gN4+neUZkzDjzxIYweegJmOAZzAwumbsdx/OmPYe/Rs2gFIaSTKjnNp372l/FMuYFYKgWbEVLhI8hqy7lC933geewJdJWhzkrj+OmzkrRNQxI/84t/B65mImqv5wOn5tXJWQ9hG0jEDcwsuEin10gcs3qIHuQ7J3R60NfqFqK9CdhRqqN1NKpNtMpNGHYIEb4QulJpsi4jYoIhkLRn057FlysUWnaQY9IW3TREMx0KA/EVD4l6YHjTCZy1WgvnLr6NNy7PodR/AvGhEQmvk+D4B+lY17V8YA07AsMOo3GngBq1Cb4Gy20hEWmbxtuwy3AwJcQSauh9rSLWiZSMdVfK7q7CuUtiHLrZNT0EzepyOlc5TT3YBE0uEgTb74WgjD3XTRjJPiR6MkiYdeh+a1XIm1QMM2gg7JcVuOsOWoGJWmUBQaMsjEJmvBdWNEBYawlIKvW9enmdwAZTzPB+GXQpp5bmwd9nKee+/+koR7agSaBURslxoD+G116bFg94elI/+s7c9yi8Iy/sqOEnJ8u48PY8+vuiOH4sJxEJnAPe6Z7obB/bme2J4PZ4CQsLDfTlHvOoLz1sEgrqSZiiMjluUQxdKmh5hwtBRnnVm86ai3RmNe2uJZroSi6jGYhEO0ybEEbW5ZKVP8fydx3hLXDiRxMpdOec00MRIc3qlBcKL2eN4zFhOu38qBvoXj9Y4aiy0Xd+X+OT8+p8wUMsosOOAMWammi3osJfo9iNDwm4uwEqVRdIE4A0VOfmoY1fxADmUGpYmI8dQfbQblHV0/fFoMoaAWhTu3itgN7eCEZHUpLPnJ0gmK4PEmotx3q5Z0fDOHvSRN9EHudvfhvjhTEYu08gkk7fFV63cU82+DUIoJshRAfH0Jy+hYTdQE9Kw/hkBV61gkTEl1SxzYZyjBMRlhJKiCxopuR7D5yGkt4F3ZcoaqRS9pf865ZtoxVOIagtKslZ+dALLawejsFI5MQ5z68u53a/q9VUxesmzPQQ4tkexI0i9Dbr3F3nyoKExgUS8ZCHjs5NIRjhGHynKveAmhn1HxtOHwGOOv/oNFKD4q9TNtLO8bXqeacfo8sGqWp/QDu8ZQjYc+H5eNueEeAkXa04uHItj/mFGnbvTmL/3qy8Ktupbt+e1m5UitIOxcik13Ql6x99PZal/42ufW//FjJ1DA7vEaczIZHiJP2gGymlIw7SdhzRLqB+0GLfbddzPI/uDYkgSIr9PYNqebCj4E4BiQ4x024OXjyN5s3z6Fv8AY6PAqODFkqlKr72/7P3XkFyZWea2HdN3pvelDeoQjkUXAHohmlP15whhxyyZ3ZEzUTsPk3s6mEiJIXeFHrUk/Sg1YNCWm1IERP7MqHY3RGXIoczwyV7SLZhd7PRMA3fMIUyKG/S5828TvH9N7OQVSgL10B330AhM6857p5zfv/9Zz/BxJUwel/ohab5MFNMzhLH1at3cXe6gO7uAbHbblSDbfcChAmgOtshOImOgcE0utstXL9zC2dvF1HsP4NYW6sQyUd2tqszG7GBYXhmO1Y+fQeDsRs4NJrG7XENWbuGUhWBdzyJK/PDh1KIdeyDaYZQtjVU88silW/l3U8JOhoOw23vh52PwK2WpfuariMUiSMci0DVTRSccp3EknPbsHiEsIdgZLoRz7TA1F2o7n0159bjyXICjHlyoFq8DR5TzdKHYd2xvr6AJSHRb5z/vMX2dY3d0w9KlXSoYv4DAqhwMW3NYO6p6C/9zRxbOqddvbYMTQdOvtiJTDoihJ1ak+fpkDnhAdTsEOJ4JVsVDcRXxJ2SMJ1TR9C/f1jMqGvbwqO84Po2R80gnV+/zIcRauyzABkpHk+EuIvkLIhREAjPsFHD8sRl7I9O4BvHVLR20LasI5lx8GIuixvvfoaltlZ0DRiIZOJYKY3h3k0bkcq0IJs9in2GC4uSVzgewdhhHYaxiAtT72AlP4ZI3xCYZ9hjcPMjHZRwNZht7ViN92Ml9xkODGgIm2kJicqXPBSzOcwsEr/dQzjZhnjEh6nk4asZuKl2uFYCDvGWPWK5c9be39iYOY41JMIOEIrDcePSWhIZU3MQUkuwCCwkuwtf8v1n5cZGxrmWHmSSBnTVEi9S+PcnxHbdlzXk013Ohm4YsHVTgHzUUGjNzLDd88/zNUqV9Ja+8Ok8aPMl8MkD4/sMdFBCQ6Ud22m2noGGNjWB+wQ35oWFkoSTnTndLaAx9GV4ggEuTS14/F+5VtLpMGhbzGYtdLZHhVH5ihkMCPw2puPH/zK+5CU+duJOTlxTfXGkKxbooe7hSOsiBqtzOHQgifbOjKCzuQRjNzT09ZoYSk9ibvUA3P5ekYqig/1wkt3IX7yA2dnr6O5qlgAf7o0x/pBS/OGDabSlCjh38yPc+WwZbt9RhImCt5XYvMvqGBKnKg6MmAp3hbjqQJwhcoqCVAJYHOzE4sIQNM9GPBGG6ZdEJR5WQ7DUtPibO0SUV1MI+RW44nOvwvDLIJCt5lcFsS6sWtBUe81Gzlzzikd2IAp/M1WXEPYIzNYepFMhmIoFC3HQmz2wfe2GwFP57olHveIxAUygjFc1Q/wlnh8xNvBRkB7votsk7JQeGfp2/fqyeE7Tk5tSCLVMHAeaTT5PyYxt5B8d/0hA+GbW3C/IhmxiyqLTF+/hNRJR9uFpH2wzx5IwvUysw6gY4saTsH8OzXls3RczmqoI9DAdNxlNwZwTz5sW4rENSL0gjsv4+G3cvXMjMChSMnkMh2M7iMaSeOHkS4hGmy3dj6Hw57yINeLOvY604WGXOZ/l+7KtKu5MW7g5BVhOAp6WQCSUgKkWMLfgQdeKSNCJjkDy8JHLO9DSSbT01Z1PVEqpKmxbEY/4RoKPxzHO3IS5cXT0pPBmxkLrlWu4MF5CbeglGPGYQMU+bD2KpqJWsOEXVpCMBWFhjmCWE/VMge+6IMqZoWhCsOFTJU5IDAWKU4Wbn0EokQbCMTi+KQuA1y0liYiP4DD0AAAgAElEQVSflWdKSgY1GAj7tXozfUGxs/wYLFcTb3qBk224kZDDMBMIt3QhFQ8hirwQdr5jhr7t9iDrQCjckhtBtbAKv0qHuiC+d7dlPAv3EXuAGpBwRK9LU9vP9oD4BeFwr73aK45ShYKN5eWKeM2TcWOcexDTvB6e9mn0l+uNjlzz8xUsLTP+VhHJl1jpIZ2x36qA6hCRjcS0cTChSj5L5ENFPLyJYfS0JGW2guBWBPlZWi5KQhhm6+vvo19NXfnUaOhz+NlgTKjhIVO4sFhGn6DhcX5sP9+ew+7uusncC2/fvI4L7/17mJobOADv+unNbyTNKVcsmMkBjIwe+Yq4bxgmIe4U7pi9ijjt5Oq3Tji64en6T25y3CAYP3z2so2r870oR4cQbU8hkiSIgA8rW0To5izabt7Di4N5jB2KIJur4t1LIax2nEGmNcO9CZV8DfmpaXjTt3GiYwoHhmPwfPWRFgYnAZPgBOpuRZKtMMwmE3Ohr1YgqWnu732bd3Kns2RKrDKi1hySGdYUFMixKVZc5FfyiLrL0DV6ypNrVUTSZoiZbZUDT/h4XIiuhpoknGEZJO41NQLdI0FX4ComLKgg/nwIVbiqiXwRqK5MA9X8WqgcNRFKKIpIWy/SMQUm8gLuQRW/Rk0AyFxs3ukg8QvxFNhSOo558JUIapYNOz8Pxavb6lmHlLL5phWMN5m+QLokwMjGg2ea9zy+qydxNMpVVFWIHt+A+C9uUxk3Y2qi9u9PCeTo0nIFl68sYHExBjpNES2N3PCBAy2CasbwzKe1gXNMaS4YH8+JE1qmxZTwPDIfC4sVgUUNhTToOtDeGhWizzGgBDk3X8bqakWYceY3Hx5KCzMg8d3bjAef53xujCVvFYZpk/e6WTF8jnj8jAe/ezcnSXZaW8IYGEjXtQ8Pzo/NynnWz5GnZnpf5ha48RkdXIHeniAf/eep5fm8x80I6WhLhRHRHyNxNwEvYoLrev3hoVgsIxxlqG5wpVoqwvY1xOP3veSDKz6qpQIK5SpCkRhS8SicWhUEXNHrTDHz0Qcw6utr2f4X5zOhEm0UStTYRu/vuL6HSqWEUrkqDtnhsIlIOAy4Nqo2JJmRlE0QNoWQ6nvfGIW4U5177bYLXa9gYL8hC3j3mxTVa8DKUg4fnVVwqfoykqcG0dFK7z2GfnHDU+D3p+A43Vi8N4K3x69gfvU2iqsWJrWX0N7XK41fvpuFffscDicncOSMjp7eBEKhvSM9ceOTjYg7L6EgXRfzs2WsZh2oGsNqgJWSivFiF6rdJxCh1L4Xj71N3igJJT3Lw6GapBAVgiVe+wrKZQflElO4Mg6gMQlJ5EOoeSo8TiTPE9Q6MjKhOnslXvJ+BVVEEYKFiJeHp5ggYE9NCQNeEY6vS3Y2zQjDV3x41SI9CQXsJpRsRyKmw0SuTnQCcsv49SqoKWmQ36BDjenDvtT8AKSjRnW/+MKHJURRNWNQjEiQjzkUgQPa3dmvBw9XoZ3eRK3mo2J5m4YzEZWvgfLFQjhupBVsy07EZmONfANcA1vh/DT6RwLFjHxkXhqMBecL5/xm6lNu1rNzJSyvVHB3IifS7qlTTN2oYHw8iwsX5yRBSd++pKhhG2VubN/j/E2mgylLZ+eLOH6sHW2tNC0FxJue/A0v89WsJcA78wtFIaC27SERN3HyxS7BxWeuc4b6DfSnRFO2VRs1YgIgSPJEx0JK+hyzsKlJBMFmY8frFBb4MhtMwfS9Aj69vCiOZkzDSq0Cjy8S0eO85R5Epo+OXsw4x+ifwcG0MIubzbGtxv2LdZ5zKMC85Np51IMlBALK+rLsSg6ffPABbs8sIdMzhNNnTgGLd/DBhxeQ90MYOvYiXjk2HMSVw8f87ct4+9fnocQjGB47ho6oj/OfXEDN6MF3/+BVlJamsVhwMTZ2YFdOar61gr//1e+QGR7DC70xfPjOu5jNVSXO/sUTR5GO0CZmY/LWVXx6cwpOKYfFVQUvf/0VuIsTuL1g4eTrr2OkU8OVq3PoGxlCe3ojQ7Lz6MnK4kRcsHvx7tV5pBJFtLVHZUPe+fEgXefyUhG/ft/DePE0EodGkGpXoGuOeKtzR5ChVwE9rKJzOI1K+ylcvdeJWriK5GA/jLCKlfEl+Dc+wjcG53DiWBLhWBiMNtrrQuAm5NoOrKoN13Xh2C4mZhxcX21B0egGcd2Z6cszW6APdCESY8zyTjLcLkZC+Ihg8hL6ksEIa1OOu6JQv7UzQlZdRZP2uVYeVNPLBihVBfeR/FO+JFIfz/A3bfEk+kFxiqjrW2ImvHibZKSzlsqSIJ1SuxkxxcYe6FyDEnQQLMeUcthaKYd1Kjoqjo5KPgvfrsJHUdKUamYckUQUpuIgYbqIdTPtIZ9S4KhRuETgu1/KuoHiNaZIyBWpG9lEbe0DNccDk4B0t5tC1GfnLVTriG+GEWQOXGvjutIf/BEQGBKUgNVqvoM0hptuuQaEpgrCbDQzsCSMiVgIXV3xdd7wXBuU0Bl/fehACw4MZQR3nPOM6u+DB1tkFG/eWhUsAuLRU4h40sSKxJWgKWZYEwmY6HlMaEINVUhMXsH7JrpaZ2dMrnFUODb0+CdDRXNCR1sEK6tk6pNbSs8cg9VcFRMTWdEWMEkK1f4BiI8PSv+E2QyQ3AIGjeND6s/kUEysY9c8aSczttE/gL+LJVuIe/N7aH5nz/N3vn+O29BgRsZ7/E5W8sszW6CuaWD+gq+OJzMChaVp3JjIYX9fCy5/+hHuLpQQyd6GnulBi17BP/z479HT/19hkI6PbgWXz3+Iy0sGfvTaKQz3Z/DrH/8Nzt9cQq12G0ZYQdwrItJ7ZFeEnXvjhd/+FP/zv/5/8KP/7n9Ar57B5CIw0h/Gr375SxSQwluv7Jf9tqN3AKfjGXzw87/F7Umg/foVTN2dhuuXMbeaw6lj/aj6SQxHHiErHCWYaGcH7i3HMT59Dq0ZGwoRdHYQQegQUylX8Pb7Ni6XTiJzbBSJdl+Q4Nz1adiFHjDsTFE8ROIhmAdHhKbqporScgHq+If45uFFHD/eKjCmTMywl4NqC4LCLS4UcfG2g6ybgm5GJYRpxU3A6xtFuKVViiQ2kPCP7B8J+w793E07RK7RDFS1OGxnBmHZ8oW6gSodMhqy30lhAbniMwRaaFATeg5TCm6mlcEWzVMKbDUwUZheUZKtaqhKOB8d8BwtSnIszJDi2FAMai98KLTtC9RswCsbsADJcyR6BiHUDLUjKlDZiaKWXYJv5aFommgz9FQnEjEVEZU2eke4uYApUFCBIZj0us8MS0Ff74+VD5dgPnocLekQMkldAI3W3SbEHcjma5ier8r7K1kewoYi2AjpRFhU6LvU+taRCgNs8WbJgISdw2xVPdTyNWQXclDtOiofJXbPx0rOQUdXWjKK8f7G3Of3apWJZFQhkgQmKVeCyc3kIiTkxCCnBEqpnm9qYH9aJNa9ah7uj93O30g8kglDspgxsQkPsmr3+dT6HFMCzYgMbr1YTnc6mHL9trZFcf3GisDr0jZMBrMZMIYagorl1B0KFbS1RRGNhYQ5IOO9slLB1HQOU9MKDo22SuIV2tMZ3kaGiL4AZoigU0Ri9DF6oAWJpIGpqTxufLaC8LEOcT6jRuGLdvAdcQ0yAx0zEbK/Fy7M4/DhNskgRwLPex7D9vNFG7pH6k+8rR9//Cd9WP3sffyuaKHNLWB6JY9vvv49nEwWcfPsf8LkckWIu2eVMTt+B4V8ApcuXkFIOwIzGoemr8JQdcxOT6J73wBeGOoUzWqQ/2Pr5i1PXMf5iVX09OxDqwa0943irbf6Mf77n6FUdZEgOh4PRUOmtRNRL4/ZvIJv/clbONFqYfbOuKB7qpUspua7cOaFboRVro2GxnfrujdeCXRiVBAbCszeQUwsXMTBgoV4KrFjZit6DK+u2hgvtCPzwig6+lR4ttO0wWysrk5HxVbrSTKPWsVH+c5nON05g2NH24SwN1SKDz69/gw3IpoEJAOU42BxroiPbukYD72IUEe/lE+iY4RN+e7Tbi0rqUEy15f3KL+o1lfNCJxYGyzrBhJBtJpEm4knsxvYp32FcDCir0TVj6JSLMFn3LpCBzXu0A1mIyBPJKT8E1u5eMxHhaArvi3OdFCpPqfDG50UASOWZM5SwEhA1Uy4wjCw7CARh+rXxGZe0+IwEi1iDrELeagSgucFtitNW3N4EZQnIXZsHNVJ8k+IOSPphEEJsOg2DB+JrAJN8RGLaEgl9E3nEzM3xaIqJqbLmC842N8bRSquYY7pGdIhhMPE4d+t7H6/CXxCiLp4kvvIFR0Uig6iYRVdrRriMUU0BZQwmb41V/DR3hEVJoAb7saDRE40K6oShE/KphyEWZLADw6kQRv39c+WBQBxeDhdxyffWNLj+U3mg5IhndHocU40RkKgblw7nO4NRmVjzSTOTMRCyfr2eBZMnbq/Py1l0gGq8VwuS4bQF9MDM9+xDqlfUUS9ni8kcPPWihAvIimSmSUGP5mE3u4EWtsiwhwx/pZjxLFkWz/66B7ujK+C6nlesx8m28vGTj1jvzn+ZFyYaY4ak6vXl3Dx0wXx48ikwzIOoulxA6b0K0L/6C/QiCTQFgEKZhTxSBzO4jTyFRuKr8DQdMQNVSK2WJNixPDqd3+EMSUMpXAPb//ibQwcPYW3fnAIi9kylhfmUVyZxf/313+N1sOn8ebXTiHFlNHVCpaXFlH1Qmhrb5PkMcX5W/jxf/wJ7M5BHOqbFVO1EY4gGtZw10wgacyjmluGi/Y1FLqL77+PhXIIPzhyAIMtPr5nOZhdraCwuIyqX8CH7/0c759txbe+/R2MdCX2NDh14h7YOaMZHd6yIshK3Bi3PajmdD1UawriHRmE2zR4XPSbMuCBik5cmBr7psLwHQ3V2VsYVC7jxNEkFM3Y1QIXok5PYcfG0ryFbNFHwQJuL0WwmDqB+L5h2UAaADViW10HutJoxLY93NtFxoEzBaQRR7nItI/cVe8PIjUFxFxn6laqVGnzrVCSXLkHItQpZgwa48Yl229QNQky5Ws6z1WVOCJeFo4SQlnJwAy5CIXDCEdCdduRghS71ZdC1VZQcxUkCX+rBn3lBlKuaihki1C8Cox4Gj1tKohotGimsbwahmtROt84NmwDW/LglfsDxKv3+9p8nqWRVpJgbkY0a56PiKmivyeCcsVFOhlCxQpgh3k/CdBmz92vY/Nv1JLQ12O1aGMlW0Oh5Ir/wb4uagPqqULrDE+x4iGeiqOrM1g8zRssCSiJUaFYk9TFtE/z/YVCQShXo318hmlf2dbJyTzSGVOwxRkKtfGQtbXGJG28uvvf7CO1BLncAq5eXxa4VhLJ3Y4XCTTX0shQRlTqn91aAZ0GDwxn0NkRg6rTkRWiTo5FQ4IeGcSgB3OEjrfsSzppSmIXqu2npvMyvsTfJ+NgmgwHDRgMlkVGl+4tdGgdO9qGjz+Zx40by5JelyFjzWO/+5F49u8kgacZ5eSJLty4tYJbt1YlaoPnUglDrnFuUTLkeuG7+aKOxZN9Wz7yi9OYXlFx6MVv4PTsCn78s1+glO4To2beqmC+bOPomq+HBzXWgtHBQUSLBn7+q09ghdrw4ql+3Dj/IfKLKmbvTktejalzH6CjpwevH+1DpbCMcx+9g/lKGt9882sY7E4hu7KClXwJscQSbl2/jpzej89ujsMMp3HyzT/FZ1f+Na5euoyvvXZIPJ7gFXDps1m0dB9EP5PGKMDhUy+jc+I63s9lkb17C1ktgtDqXfzs11fxV3/xMsJ7EOAD4k4vWttFbW4KfS3MTRze0dbNVIueU8NKzoUTSgQJUbaajYoCSgJUbZLIiHpe1WAVckisXsYrh1Sk0rE1bmq7l0+bIpPRTEwWcXfWw4yVQU5rA2Ip6Ps6EU1mROXuN6n5NpKr7cp/+Gu+SMFlN458NUyXNNnUgvJokKd0baBU8VDJZcULXTQJtiUiphoyROAOHBDvt4KEVUcVFTWNoqLD0HzEYzGkY74A4YTUgPAK6WVUgKYgW1FRrinoSgRe7Xwt3GCtsAdTjwFKVAh/JBQkgOlIqfCUKEjkGK/POnd3yC60u1u3uYvti0Y0xKOcjlTpNvkCbPPcZpfYTxJkbqaLyzUsZynRahjoiwjuAJ9pED/ex1zhyzkHx14I0nQ2JF+WQ8JHaZL5u6mavjdbRKXqiHmBzBthkekRTQLPzZjlksDPzhYldpsEkowBrzfK46cMryzS+/Hpm/Vlu3NsO9s0v1BGrlBDsVjF/v4kOtpiqDH8ZZcH203FCNuaSpm4dSsrjMLcfAn79yeRTIbFzl6jAwzVzBvYPL47jlGQHjeD/ftpegsYH/a1EX+/sTlU0WdaIuKpPzWVEwbiyOH2NUZg4/1fhN+cWxyno4fbJHkRTRqLSxUBu+GkMA1d5hPD6HifeGevMUZfEfvdzQEFVm4O//Tzj/Hh2RYwy+jX/+jPUCvP4ewvf4azvg1tYBRDaRd37oyLj8qNj3+Lv/v1e2gxPPSOjuGVY90oLk7i+s1VvHDmJLRqHu9+fBuZA6NryWDimU688e0/huPRdyUI4+4+8CL+m//+KOzaMvLjk2g90ge/OI2f/uQf0TnYhoLeglNjo8hN3MKM0YbBxDIKbgit+4YRachG1Syu3RpHrOcATqd9/N07n6BgJDHWlYHeuGd3A9HwEQg8yjVrGT0HNIQjBqwd0lhy4bqOi5W8B9tgGJvMwgerVVRYS/OoTVyHG+9DYmQEdMiR/MgzVzHWlkNHZ0ac54Jd78EiGmdIuKrlGs5fLeBqvgtWagh6ewfMJFXQ9H6mStsLWN/GQ0/xkxtu1TdRrDKDTyMWvUmmVWi71UDPdq9WktA56TNDCY1oELIhqo/mt0gC6gpcbDyRQDpqI2q4ElInXWumw7yV6mJqT7z6+6hfpzTA+NKO1P0BEe0z8fwVD51JB9WqicmlOJxcGepWjFpdRm8oaBgq16BX90ve+zdWR0dHIQhMqEOxdI8HH2EMdTbvYGbeEgmou9NEJkWP/gBwZn23fFQtvhNDIE/5/niwDXxrtAtP3csL7Cw33pGhNDS9HqO9VAY9zY94vjjh0c5MiZ7MJ5Oe3LqdxbVryxjYnxKnN9ZLqTebswRHnR799FpnStmAMdhbZ9mfXK6GW7dXJd0onf1iUeOhHLU4ZxyPxEXD4UOtaGuPYG62iEuXl+pjEWglKFlv5UdAxobjRrU7jwYDtVWvWA7dSw4dbBHpfmIyJ2aBRNz4QkuszEFP5oeaEGIk9PYmxT+BaHaFQhWMJpiczMmYMBseQ+p4b6Bt2ppZ2mqcn7XzskYbitxHbBznG8tr7EWN4toHT+CtH4Rxc3oeyfb9eOHIAfilBZz75DJyro5DL7yIzngIE7ki4qlWfP0730X66k2U/TAOHz2C7owBq5DEi6+cwr6+TrQnTHT0jaNlYBSj+wK/LVULIZnKNKqUT+beiOohIBbFX/zlX8JId6CnNYqQGsbEcgEnXnwNhwe6kVuYgi1hQh34wX/xpzBSXWvl+NAwePAYIq09yGh9UJMdKGkpHB8bfkji7gNGlDHuYdSYWlR2wO03V6p7i5aPmZUI9IFIILlvGGXGHtpVB8rsLZxO3sJ8YQ53r9VQHTqM2sochp1JjO4Pi/MWPX03O7iJcSMkYa9ZVXx8IYdzhQMIHz6FRCoOhVnOqHJ/xFC2zere0zlqJ2wfnlVFOORAD2lQmN6O+d6FsKiCTpcyFcTDJipOCsVlA06e1mUfmhGo5IQiNVXs+CHYfiCxd6QcxI1GyFzTTU1fN9B6ERIb5+Sz8aPpGRL5kOqhp1WD1b8P83dceJXVpju2+7pJgdvdvotrJHY6mTWu3PXUeMunA6YAWFisYn6phnhMEw982ru4oW4kNkLAfSBb9NDd24J4nD4BwRwkgSIk6vmL8+jtiaOrIyY44VShcsNl+3j+2vVlXL66KNwAs8bRxsx66JhGB7s7d3NYzVbE3soXsbRSkegIOqQViw6sSla87QngQgd3tpPdZdu4+jZr99oAKBBEt2gkJGFwQ0MZCW3b2M+1+3fxpaFh6OmMS2w8x4De7pTeWzJBaCT7vtXBS9td3/gc72edJOiGrkq4WCppBMEdG2/+Av3mGIkbSZ0hSsRDEm1ALUehUAMdJOm7kM9XMTtbEIaJjoxMosWxInPAubbNq3gmR8txbOSLFVQfM4iN5lvr5h3Nu/2jx+RvbSCSHXjlW2+u/eSX/fv76r+jeOWNIE9744ZwIoP+uok71dGP19/sb1zaxaeC4YOH1+4bPXEGo2u/gEz3fjTYgpHR9XZ0xUygZ1/jXBIvnFmfM76pmB2/ilqem4luqPCrmqjHOPkUEqXtZo8foMutOJ0IJ+tOXRvWPZV4bs1GUi/i4IEEToQ1vHf+Ai6eW0RrOIvjRz1EknTce5Cwi2pPA+xqFVWLoTQObk3WcKW0H5FDpxBNRuHVrF0rkHcciUe8gXCeVs6Ht0qsdwsT9yg5Bo5scwtVAeKhJzxD0eg85+k6ilqoTryIB8CANw5gYxB9OEoUSjiFkEZtig5dccGh2u61NLrRKKXxe6dPRuYQXKK/J4ZKuRurEyUoTuAFz3fBv8bBb4HpZ6+1NErY+ZOObmLGYSbbHaph08igzM5XsbRSRXeHibaMIWNKc9BmB/tQqbqwPQWdXXGRjKjKpxqdseH0bLZrroQyWVUH7nJFtALElycB5X2UcmmLp0NbX93ezjKoUmVIHZmByakc7k4EdmgmFOnoiIAEmUSNmgF62K+uWgG0bUgVrHVK0rTp05M9mTDXMSayNlUyki6m7+XFzBWN6bJGtl2vmw3CJuc4t2iq4Pvu7qZXKMcvMC08jvI3VsmxpFRKEWx+oSSSKr34H4VJ2VjHs/yb4825wGgazuN43JR5w/5T02NZLhaXy5ibL9bHh74cDD0MixMiPe53sx983mPA9dzRtQ8rB74uZlOaHB7HkbBriMXbEDYfLlzscbThWS2jbnNX4K7eQ4++gq4WBoo1HI42bzYXvue5mF/x4MU6YEZIpDYh0JqGasmC4VvirR7PxPHaixX0Tt6RCdzdk3hA4BZGQ1fFrn7rdgF3FzXYRgIVC5ivtUAbGUMkGYZn31d7b97Kp302sH23t3TCz6Zw49odmExEr6goWkG0GxzakhW4viZx1k5xSWzcajQJLWRA8WoitbiKTv0bXDOKjtYIkhGOrS/ake0WMgkWTaN0qNMDx/Y9DQLLjpkuWlrjyC8k4OaZ2tVFxdXheAE8r9hcWRHngBaCATIAj+9g0TwKZQeLK9X7gED185t9cM4wzG1l1UZXu4muNlN8PCj5bnVwDi9nbaRaUhLeReaG50iYiZ7GGOwTxztlni6Nl4XYhEmE6gc3ZN7bkomIZ/i5iwsiObP9B0epCTBEeh890ApK1Rw3MgRsK49IRBXmgMzHh7+fgSC17Q9ATuiER+CjmdkSurtjspETzZG7OBmEqu1JG+khf+hgm7ShOXyt3sRH+iAhp6khOBqfj1Tkpg+TiMViIbS2RnD7zqqE0nV1xtfC8Tgnv1yEPnA45DyhBommEoYO9u9LCpTt3FwRKysr4mW/ryeB1taw2Oa38m3YdNA/h5M0eY0dP4GjYyeeSO0Nk9oTKfw5LVSIOxdPvzGPowd9xOIMddl+MVNbSuKTLXhwQ8kgVttfH9iuqBrsig1/7ha60yVE4wmJF45ETRw92iHcZnO4DcePE5p/tYqFO3eL+O2NDKzu44i0MGSLWdZMkXD8R87i9vjfFn2YQmEFlaiByoKGtpiDcJgDxUQ6GkqFMGwlJX4Btm+gRicrzYTZlka4tRO6EULVK0uSCTNsSDiRpoUQNugE54lkuh1hb4xfpaaiYqvoSAaZ7rZ/k+vHgbSQ2oX2tI6V9naslLJwy1kU5mzAiEAzYnCsIuBUhEmJtrTTvHRf2bCuuCB8T9d1Mans1Pa1R8WEAawsF1FYcUSC3elZjrJVYypdA2Yvbdjbaze4ERAO0vZ0HOzPSAwytUckkDc+ywrq3JHDrTh6pE1U4wxZmp8ryeba25uog7cEEhft65yztArRjEY1Ku2mRw63iUZKNmlJNhOEIor06wPFog0CutA7/fTpLrHnx2KG8Mi8p1JxMDNTwN27eZRKSyLRUqK+O5kTAJlIVBfv+EZo2tr4PYdf+L5oClhaKuPatSUZG4Li0NNe1/kXjN1z2LWHbnIwh7mGgj2R2gw6a3Z3xcXUw2Q7xK6nTZ5hmGSQONc2Y4R4fqc19NAN3cODZHC3Qo/cQzFf3brLEahL7j56usJobyd4SHXnRwXYwxHHMSMZEXVl8+QRwm57qI1/irHoTRwd5T16oH4SW+KDVcgmGFJwdzyH9z91sax0I3TwJDKdnYLeFuzYDLXbvSfwg7U8wTM+oJsKctUcrIVFHGoPAsgoqUcMH53tBuC3CBGgI38y48PviSMSVuEqDIHzEDNiYg65v5n5YgunVLmrQwEYeaWpHkKStWxXT627iQqYeNhDZ1cK+eV2uLl78GsVaLEWhDNtgKminHNglwr1RDXrHl/7QYLLvhO8hPbzvR4GbEQ0CyHCnu7AoQgLxdDIBlbANpXJRuf5WMo6yLRnZMMkUadUnc3VxNZOLHl6wTPBCgkt7Z2tbRVMTGaRTpsikVNyb0idB1IZ8dBn2fyjFzsPfufBOP1cvopyyZb7mC1sbq4k6+bASEakcdbvNEV4MEMawXFicVPgbQltSyaD5RimJhoFQ7KN7XZyBG15Fv/n+IcjmjBTTLRCx7L5+ZLYnwnUc+RIu6DpbUa4nsX+PO42rRH6ujMeoYZbM2Esr1oSenj+wpz4eXCOMFkQ7+f85NxtqL85FQOfjh0W0+NufL08tmV6ehqTd++IlrfRrketznFdRGNxHDl6DJHI3iFaH7X+Z/n5wJyLarQAACAASURBVOYu8qUI47tsqy9ha45vIJIOQVF9oqcGh2BQqyhP3saQfQWvnAojkohIaNKWhYs6FFiez+PDGzrupV9Gcn8/InEdilvd3va/ZaFP+QLV1JTGrSzCKpHKSJSCoDJT9xFOBCr54HzgMUsUQCL2LeVdMKSNcenk1ZuJ2Taa5fUdVIBcWUW2pCERIeTNwx1c+pTeGW8ez7Qgm5uH5zJVjYKw7iJieoiaLShVW+FrBlyUJFRP+vXQtT7YVhK7EKFUdyG1kYjSF0EzNIQ0enQ/WF7jDLVOhMPVTROHD3cIrrpVR5yjvZze9gSgoU0zgFcNQusGB1I4e64sUvnoSMtaciW+Kz7HgzHmQbhnABfMc9zEZucK4j1PlDJeJzEj89DTExeJy3WaVeBBS7k5U4xvb42AqHgkev39SWEuqPEioZN7gtuf+/8ZaRCPmYK4R+x6+jaQeNFEQkKfSmU2lUqf+47vsQMkktSs0oQU2N7p10EP+zyy2aqgFtJhs7UlIr4MjEKgDwmTB3V1x2HomkSlbOXAvMfm7Pp2rsnPrl3Cu3/37wCvCk0L5MpdF7DJjWRYKpaFlt4j2D8w+BVx3zBGayO8zX644RH+DGyTplYWAHz+bhyKoqG8vIKW/GWcGdPE8c3eIayOhG1utoR3zmtYSJ9Gx4FhyQnvOfXsY43Cn+VPqr4IyKIWETfKkiBm3ZjSC5rOSYSEleEiNGzAUNFOzsxFsp+ve2h3HWZ5BUvFYiHwqhcb/SOo4rjRRkMueugT4QzBLhehGkEsp+pVEFV9GFETNabm9WPwFB2EsNWC/Hp1VnF3bd/qLg5DILHsTqXIeyWBbkNc3qRgEvay5WFy3kYkYeD27SWM31lCd08K9HYn1jqJ5vR0XsKPWBTLJTEmwAiBWRYWS8gX6Y1vrNmF16oi1KuhiZczIWoj4ZBIS8vLlpT3wvGOANeI4YdaoJFoMAZrZTR9oaSl6ap45jPsjihww0OZLxRRb+ru2njSl4EZsmLxEJaXyqhYNXkPjffR/MyX9TuJPPdVQlZLuKWhY3mlLBkCrXuumDSIApjLWcIIFPM1zC+WxNu+pSUijprbLJUnMKwUflx4tTw0tyJw14+jEr9qwa2VdycA+gQSI8T3ZjUHoayPx3bPiCaaY+9XRIdJhkFvdmx3bbP7d3tujbjv9oHgPl+SwrhuCI4dpDFhN8hNMiOVtzKJw51FdHQkhcvkRNzqoAo6l63gt+cUTCfOIDM8BFWpJ53Z6qFn8TwlR6KWxUKCRAe/2szzrPnA3/8S0ECb2VV8RdTvHMOtR2rzTgvBshXkyhpaYi5SEbcBVb/5A7s4y9dFjUJ7WkU02iUTtVL1UcjmQQRfbgqaX0UEVTBlbU2JSmpapqo1fGa/a6hxdlHZJrewfI6Lt/la2OSJ3Z1iuSLxegry2QpqeabBDTztCRxCz/SujrhAyHID7O5KSJY3ls45TAbg3r08sqsV2SQ31sqyqUZeXCwhl6sikTCxOFtArlAVhyjGiYu0XW/HNstirWgyFpS4rJor3vUMSctkomuEcO3GL9AXjiOBeKjlIIGiGYM+Edx4t9tLvkBDsOuuNJxGqQXq7Q0cERl9QU0PN6Cxo+0CTpTLVjG3UJJ0u8xwODqSkWiI7ZjLXTdilzfy/TEWXFNcME78UQ/ul7pOGHO9Dq60vsSlu9dw4doMjn3j69CXx/Heux+hoKVx5vVXcXBf29rN+YUpfPDeR5iv6Hjpa19Dp5nDb975AG5sEN/7g5dRWJjGfN7BkSPD2KnVVm4RH3/0ISbmSjh0+jWMDaZx8d238enUCvoPncbrp48jbgZE37UruHb+I1y8NoWOkRfw0tg+3Pj4d7g2Z+P1b7+JgYyP61dm0Dm8H+3pOib9Wqt3/vKQxD1IPFGqMVSGA1snSYy/pGNSuYyWHgM+F+MWoUhsGpkBvujpuSqmcASp4UHoGlHS9kridu7o07iD/bHcFPxqAh2JgoxL4EMf1E7iErzW4DcJc81mrnPAZMBBQO/XmrrbUbAdRZIasAA61Bk648R3+/Radeu+kPAw9j1DFb+qIK+rKOeDNx0wIUFPiJ5HeFwHBmwlKkh6hl+G4ZXWlbfrH74vSHXJdByFVRshLUi4s5vebAWw0qhbwv3CGkb7TSGy0YiJpVUbmkR/BKpuZlAjOhvjjDcehkEziiLq+s2IDMeMEr5pMuIEoia9cHFBbPZEgKM6dbPnNtaz8TfnSUvGxMw9Ffm8jdbWwIFv431ftN9cL/Sip+aDYXJPM6Xu8zaWZAI5Xjw4ZvxjeC3XBDVxBEsixn/VcvDB72fEmVPuUXcIeX7eBqLRXiePn/67f4P/dMnG/3jsOGbfO4uKkYJRWcJ/+Ju/w3/93/4LZCIhwfP4zc9+jDvmMA6HC/inn/0taqoDK5tDoTiFSEyDWctCSQ/uSNj9yjJ+9+vfYKJsIKNZuHj2HIr3Qvj47AJOvtKHC7/9J9QsDT/89pgk0b7wwXu4eHMWXZ1J3Dh/Frl71zBxdw6OW8HPf2rhpaO9WC6H0H344cL8Hso0K9JixcVCIQ5XZTxxfeslJC1hZsslsSGLInqbXZmqyZWlIi7e8OGm+mCQo3lWHeYak2bLz6CjVT8MyzfXEfHGI8zVTg/txp/ja6i6hJmkQ1ewCBuS5G7dpISgmB5SUU/szrmKJip6rvPGYm/Uv5vP+v4gt7JH0p56kpTNng+YF2amtxDxs5KCljj4lOYDl7rADCGMTd3hbLvvrDMaUbG/LwGmrbXsnfvBSUybu6jciBu0WUPr51g3oW6ZyIZqM258zQ+QeNNLm3Nz48EQNV6P1YFEmq+LVKKRUQuglpeWKrhybRGptClwoyzzoQi7qkgc/eUrS5LUhkAmlGy/DAcJFp3HOtojgjswO1eU8DB6jn91PDgC3Av4R+0Q/wgLzE/OO/pp0GHTMHUcPtiGpWULTFMsa/RhNooHq392zvgOrlECvmuhva0VmpHA177/A3z/6yfglrIo1+ioHMwh4rkQKrFYzCJXyKFs2xKNQn8pTXVx57MbKDpRnDpxYMf+rdybxke/+xiLuQpibX04frgbuZk7qLWP4Vtvfh+9LR4m7t0KsEvtVVz65AKu3F6BbsZx7MUx9HWkQFM0CM2+cg/XZgoYOjCA1ujDqTD3LLnLxgymyPThRhOIcqPjjKpDTtaW59EfXUAqEYJPhLYHwAGDMeLmaZUtfHDBxR37EFo6GQtM7+Qdx/CZvYEbv27o4iHe3EgyQ5atYG6Z8culgBliP1UVnh5BIqIjS8aoqe/8rivEjw8835uvNZfNR0iH0tFg7AoVoOY+qAVofmar73y3lG7Z3r0dlOj5kI+QXxaNBYm7gxCYBY/riI5leyFuibiO9o44lmYsRI2aSCFs08ZxYM3EEaj5EfS2RgWfnyluAwz0TXpBNTw3POljsBuy33SkI2Hmd7azQaxdNyiJvysVW0IxCUIjNnNPkXLIJNAJbPxuDgsLBcGW10M6qNo/eLBVmIWHyXgmjILjYXKaYDWKZGVjuByJ3pfh4F7AuTMykhF/FIZ+0bmuh5nmWh8OtvfLMG5b9ZF7Bec2wygZjSF7+SP45mxVz+d73sPMjfP46Poqznz7W7h9Z0b8n5KpJPJT0yiUquDekq16iJuaoHSHmY/k9jwmVRtG6wF87dUxzIzfQM7SoJRzsEqrePtv/wNaBg/jzMmj8pxkv6jv19z3eZQrZczn8hht70JbGnj/dx+ilFuCORIRKmgmYrK5MmjcqFWwuLoMN3YE3e0ZvPf+ORx56WV8840QJhbyUJ0aHLWCK2ffxfVraZx85XUMtOxNNb9n4s7FxjzWU3MhOGYGRliF7wXchl0sIZW/irEBD9FERGK1N3vRspkqPmZmy7hV6UfLi8eRatXg2o9mq92srqd5jgtH8x1oa77UQe3cmAtlYGJ8EbXsnGxY5GIYupXq7kfbUCtCjGVv7NlU1ztAoUoQC1/C2urzR6g255QsTCGn9TpI+LhQaShRiXgXCKRNienWhkKeX/sVfOG5bFlFsaqhPe4gHGIKyg037fizQeAJbOPB1WPIl4AbN7MwQg9GYzSK55wKlke9AtnUFZTKLvKWBvp2sC38a4u7COl1mFb4yJdV5N0IYskEXEXF9FxNJFxxZmlUsFm768R9Ne8AYUuIBr3dGcZGr2PGV4foWex6gj1AaXF5tSxJZIhKx4PXCC28slzBvekcysUKDNWlrQUt7S04cYze+PpD4b03mkzJi9kXu7tiYucnwSNjEcyHYNQ473g++Nuu041Sn59PainYX9qI4zGmt63g2o0lCWEc6E/JWvqyaDIe9a019hCOITVJDPnkPG9EhTxq+c/E876N8euXcefuHEK1ZXx0aQodr15A6kgfurqP4M/fsvB//tt/j/PTWew70o7S8gx+f/4eXvvn/xLf6FzB//Jv/w53sq/iT7//FsYvf4yPL+cxc+szLCwWoU7MIdrajpdHu1FYmsa7v/0NFqw0vvHm1zHUnUI4ZGJg3xBeOHkahzPz+OmPf4WqCsSrFjzPQSlbgJLsBTOPIGSio6MXHb1HcWzsEH71k3/AvZyLf/4Hb2Js/g7efudTLE3cxLxFYDMF85VW/Mt/dhLGHpRWeybu5FLy+RoWy63QO1uhcqMVdC/AsVwYZQuZhCHOMIRG3OxgGYxXv7fApN3diKXD8NalZN3sqefknOdBaaKKJLI1V8NKtgw7vwjFLorahYNGwFnFsRAJ1dXqdTOvSPo1oq5pyFVUQZzTVF+IGsPquKUznt1171NFnqOrAjUERKdjjgAem231knmO8nXTxVJNQdFS4Tj81GDoTl0WD8Zd7m1+YJvXwe7rKCOi+NC8COyiDU+hZiFoU+NRGSYyIU3taFzjJ5miliihTxWslhuqqSBRDXtWqirQU6149Vg/MilDnK6oOids7G4OtibVwdAiV+LbiWVPQmqYKgg5SzUwCQeJp6IFiWTKRQtOtSYqfTabDFnNqgpR724JIRoJi88Iw97oUEfksEchPiRsHIeZmaL4AYTDIdEecKNmvgXmMCCOAD3M+cdzQTzzbkbg+biHDA77y1zzRGqbmMpj/G4WxWINRABkbDfv+erYfgQay5dRHCToi4tlGJw/dTOHzPM60EzDP0T26saD2xf/bFxVQjj82nfRfmgVNz5+F7NZoLslggvv/waItaI1ZCM+cAjD6RCy2TxcNYajB7qxdPc6PprJI9HWiYHeFKrZWVy/vYgjJ8bgFYvIFT1EYjEJJWRHzUgCQ6NH0OFEkIoJuUZrfz+OHRnE+LkPYCcrGDr1EnrSCs6e/QS//MUC5sphjJ0YgVvMoYwwjhw5hGt3b+P9D4qIdPXjyGA34Fu4cn0cZscADhsVrN6YghYyEY0wp8DehnjvxJ2EpUZI0hBCMSY7oWRBlFUffiiG6ZUU5paKaO0MHDw2mxfENMmtVnB3NQW/t70e9vb8L04qcEtWCHY1UHkJZSWoia2ikLcAuyxgPsFbUqF6noT5MDwrFbnvCcx9ilETyWiAJU82oFJV4VYUREKBJJ+3AhaOYDUcOb5311dQqSkIaSQoxLV/8OACpic6GQCq83kPiSwJb1vcgaq4WC7qyFc0pKKBfp/shBA5jwqlRm0Pls0zNvlSPSwXaeeL6T5SMYbskKHb/Jmtz3JT90HGw6wpyMQ8GCEyk4EqXk204cyrI5I2lO3iXOPfnjZ6QZfzxBG00bNAzRbYKS3LEVNFqWTDqa2iNQG0xtx1dWgxDQZVE3ViT2JTLtUEd57S5sMefFck7L09SSyvVMSGWlmtCg45tWdsL4k/Q2xI1E1TRVtbTMBuHrbOZ/U5vlcybnQSC0K/NPz+7KyEG44eIDjU7hi6Z7V/T6tdXMc0aXTlYrh8eVFMSMND6cAsVc8nwnFOZyIgSBIFNF3TZH/ZSljbU9sbi5Sfj+NolLdWloqWjh75G+juxOCxefSPHYHbl8T5sxewUovhj//8RxjMADduzyLd2Yvv/9kP8NEHZzFTMPCdP/4BRvpiKGcrGDl8HIPDPeiJR5D55DraRscwNtAhNZmxNA4fO7VWK7+o0Ta8/p1v4dyHv8eyncCbf/gyuhMK2uPv4dO7S3jpD/8ILx0ZQH7mDlb0Npx442uIJz7G9ck8vvbd7+DIYBf8ah7t+wfQ37Yfrdp+aKmPkNda8PKpQwg9aeJOUkIc75JtQicIS4OE+D7MqA4vNYBbE/cwMmjBiERRI8BH03vkZlWr2bh4pYgljCLRnnmOnejWvVtxtmJqGI8pS9dU54pIXJUSMQE2GrR9uJUySDjslCGJYxpDRek9EfbWho5zmFJ5qaZitaIJQ9WZsmGS2EnoGnHlVaz4KqKmh1g4ON/cwoABAJYKutj9M7FgQ2SdZBXIqLHeiOGK9J6MBMksWAbv8SmmipqGd2+Yab4HR4tDjaaRiodEXbqY9UGIAwH34fONzjU3aofvrMlgznqGx3lBcp1cxYMbbsGpl4YxNJgW9XizdMx7+QZ2ro5siwJVVwWAhk0J3luQKGdmtoDbd5bEm75YqonTYmfGlLSvG/vSMKmQGQmHVWTLNbHRp5JBkpgdurnp5UYd6XRYwGv4W5gsmnSorqc63vMFSY9ziLnBF64t44Xj7XWtwc4jsGnFz/BJiYhwIWYKwq4yqQpDwKjR+LL4IjzK6+Ec4h48MJAWiX1mtiiJjShMkMmnVpbhcamUKbgPJPSGrqOzMyqZAWUfeMhpRQ1ZlVovtwpV2zOn/0C3uVYtq4pIrUbW/oHrRqoNYy/UQ9469+Obf9QrjHqIKRjhY+TAEFSVPkExvPHtP4Tjq6LJYEHRVBsOpIMiW/pG8N19wzt79gIIJ9rw6pvfgcPwZkpQAI6/9oc4dNpeEwBC+4bRolDLpuLwmdcxfMJduwYjgf0DyaBiZPDaN78r/kwbdtv69e0/9i5WiGQERJImENGCDV82fh8hw0NqqAdXzvej8+JNnDmpwjSYxKOxKVGVCExNVHBushfK4WFEooC3A8jN9l14Nq4qRB5jrHt1HrqfhUNCJDQwMEF47nrsfbaanpperYh8voxaRwQRfT06XWO6BgSHSV18IbyVOsNEWFsSY94XqGxonwwwzkWibxTQNESUL9NRB9mSLhJ8zAxw63kLF74QFDIn63TlXDoKHCUMX6Fsbovz43oC70MxImjLRNAaq8nzqqKjYivwQD+EhztExa8FjA61FTRFqOEkTr40IrnAGXrZTNjX+rHJYt+sBbIpbBgnYQ4UX3Kvry7l0ZbS0BLlXNZlY9xJSAye36y2hzsn6lKJNFAEZ52lSE4iflEAwgsRxY6od59+Or8WY892yPt8uGqf2af4vqldIe7Anbs2qjUPXyGP7v51cfy4TwwOpgSoitoprgNqZCm4cdKQsSVTnoybgtPA1MYnjncgk46I+Wv3tQV3cj/p7N6H0TN/JGm61YeApN6sTgqKqdZumLvICkcIdGbvrLcIzHvROFSdSbCajo3UNNhgm27Y+qui6Q+EzTU0e3yKcfn3D1XMSmu/H6gnEBTXru/hS3Mtu36Me6ERAbyNEf2+h3SXjtqB43j7mo2l3DSOjJTR0RHYDAgPaoQYdmDDSbYj0Z4mZd91vc/qjaquwfZCWLoyjsjsbxE1c1gohNESdwnFvvXBCeTaKOZKKFXSiCY3340btKexUceM4AwJH/8aR4M4Nzb1xv2N6/zk7WQS6L29WtJEWmdcPO/lcwwpo1kgYtTjZvkAw8cMCNxlpRpBpVBAyF4VoJsG5FrgoU6TARmUoFGEqmX43zo+obkxu/zO4qjFsGo+VioRnD7RJ7ZWwsaS+6Vqek+q+F3UG3RbQdjQkYgGdklK583jvV0xHIPNxn+7Z7a7FpS1dZmu74npg9no6LkfvANOsOBdbFf283iN48EIB4l8ILF/HjvxObZZlHB+kNnQMBpkLfDvoJ8I8R64tsT/xHJx7sIcbt/JYexISFAYNzLUO3WF6JFHxo5h6MD9POc7PbOb65zdNPkRs+KrY/0I7Im4C1Phe8gXFdQ8HeHQeqcs0djCQ/tgAivGKzg3eQ83PxlHf2YVfR1VxEwgpHiYXFChRcMI1e2n65v0/PzieKiGgWrFx/K1m3A//Xv069dhZhTM51XkKkB7KNhcqdL2XXLIDWpfd2d3XdRKRdhULSlcZDtvxlsRGD7JTX0Nd2CboUyEXViOgkJFFSaEt8qGWW9WzWHoGglGkEaWGoLOtI9iVcdUJYKaZ8uiCnLQM6EP/S4Y/hhUys/G922asatLLJL1t8Y91Ao1AeD49PK85DRPJE2MDGVEnfZYbIJ1LQg3r5rtQDR49Dlgf3bV2mCzoWNeYAd+OmSH7aNDINOEVus556l+/aIeXHtUH1csW8wTddXVF7W7T6RfwRptntU09wRV0cmOewnzyXNOEa3x0uUFVIbSSCYNMTXuiaEWaGYD9xmJJ9KlrwptGoG9E3d6KVc0uFoIuhD35skR4KtDcdHaZyKzbwTVyiDuzmVx9soCCrNFtHf4iHUmEOvrR0h3H8LJqqn1T/srdxSlriZhjLqnojCTRfn6BZjT76HDuIdYTBDOQXt1tUaHlMCuq4XjdM0Wta7YSuuriERfMZijm+UF0vPjIopbDQ/Lp3mEBD5f4SIO4on5yS4yKxyd6giIQ6m+ZJGJoxOeB8v2EY5FEMsYYnZo0A8BfdB8hEOOlMdyePCjPmxbNUfsxryx4chGbUDzrOLz9AAvWUAiGUVrOizSA6FeJyaZEtXGsaPtQerWOmjHZpUF7QgaFki2WzMgHAvXob2RIEP1d74LWsk6qPLkIeO5WUOe0DlKMNyUi8UgsyPbwjZ80Y5GvwixSn+ERPwre/vjfsfNkjnXSjwWkiRHN2+uYGXVQmd7FNQS8T6+D650YSa3WSO2bcMmkt5jbCynN8sLG0xg1hCcHmMFz3FReyLu3Cg4mL5GR7qtH+VgM7MYt2jfoQd5Gl4qjY5+oK1XQTimQlMo7T26U8WTHnshOBKIHRBBqqy4Y3pVG9nbU7CvvoO26iW0xCogcAljEjnZQypgyeZKT3EgnkpASwRe9LxHymHjiR6lmgHwzSNQA4457VryuctBobc8728+2ATa8hMRF/M5XYg77fIM12N4Hd8qzQ3paN1RsrmARrx1fcGxXEallWsKjGDYmqsKvnMs6/OKvhkko/QXIBwv1a4y51xf4t3DLe148cx+ML6ZRIzvhghmFy8t4Or1ZRwabRHoV6rpeVCbwA4Gm0/g90GMch4CWqPU/SJkFAKVt7wCPiC2hCCsjI5arGsrOtkon+U2vjc+pbKn8l9gh+aAVawA0Ojpt+GpdFTeBRPzMBSub19C3vnTxEh/Or18dmrh/Geq4xNjHZieKWB+rih/3d1xicygsFIuVZBKtaItHAgKG1vPNX7p4gV8+Jv/DN+tBVFDG296iN+1Wg1tPfvxw3/2XyKZSj1ECV/cR7am0Jv0mWEotaqDkqvDMyOB09UG+qyyRB8o5XwsTbso5X3EMyoGDqmIpYJN03cpsW+1VW5S8VM+xU2RDnJEV/M94uU7cPILcFbn4OVX4RdW4BeWES3NIoV5xJL0gg+AVhpNpZRrO6pAwTKNq6lUYSiFQDol8dHrVJGqL6gCGkPO92H5HQ4ncerFo7we/r7TCEtocDNxbjS+TqQIFtMadxGr299pd6dGgrRwrZ3bVMJ7GHZHD//qJvhE1GqoRhS9/S2iSZhfKMM0QwKKlFtdRjrigL4nFETNVAteeX0EA/uTEhLV8IzOZCI4fqwDV68v4TfvTAh6GdX0TNLC8WTzmEe9VK6JrweTkBBxkvjvROlinG8sFtgRyVjwmUa8OCM9mPfdcTRReW/aVWFM+N99eziZVr6LQBPRNKhP9GswdzimjdSyZFS+iAfXZ7nIvANALHLfFPRF7Ouz0CfOI6rgCaXc0hpGqWxjaqqA5RVLYuUlXE714XgWMu2+rNmN7eY7m7k3hQ9/8f/CtQrQQw07/8Y79/KbjKyFwROv4zvff+uBB0ljaKq6f3CNBsLX2jlGnniuaAaDpDZrVx7hS+BrtK7qbUqj9kO0Htvc8zCX1og76Y1kNbODeGTuC3yhD24QwaYlUkHTuNWFW5TzPpZnfBRWfUTiQN9BTYg6vRTFRvMsbjiKKsRcdn1VhVvzxbZrF5fg5xeAxVvQl28g6qzA8KpQvJqkNzUMhlGRqOvrDLIcM248JIzZig6ryoHyoIgoSdVRXerlGxNjNQ1dHJimAd3kbcqYbzjPJ6g2y1khOGoMquHCdkow17xCNzxQ/0mejKFzMqmaquXXqh3Y4uOmK45sZBwavFhjwu7mNZKwM1SvJUbK3lQJ2+D7wvh0D3Xg1dcHYegKCsUqmJyFUtnHH9zF4uQUEqaLQslHZ5cpyTA2gsLQrp1Jm3jxeAdmRKIo4eLlBUSiGiImgW1cZHMsV0U4HEDCknjn81XkspbYFLk5kdAzfWbICEBhqKPgJtY/2CYhQI3+bxxNGa+qgxpx50UjoGLibhYLyyWR4slIPM2Dc48OdZRkIxHtgUiCp9mWJ1mXaHXoC8E4jg1T60nW+2Uum/s3/xg1MjKcEbu74C1wvwv5MMMJwVvYbIy4XxANjw6qrqeLlnOz+/Z2jhtTCGYo9AAjvTJ3F1c+vY6ylsILp08ibi/j3CeXkPdCGD46hkP9Qbw608VevXIFc8tZ1MolOHoaowd6sTo/jarejlfPjKGyuoDlQg2DQ307RP34yC5M4fzFa3DNDI4dH0OnZHNzcO3aDbiRNMYGete6mF++h08vXcFyUcHQ6GGM9mVw59JFjK+6OH7qJHrTwK3bi2jv6UI6EWCHrD28iy9C3LkhLK9aSNyrwvMYi+hA0zWkW6JQVaqaSeQZLOEHtk7DhuYwpSDVm8G+XS0BK7Me8iueMAldqu2QcQAAIABJREFUQypSbUzxBxCddjcAdFykDabhqeSPof1cY/8AazUPO78KlLJwCwXUVlYQKk0i5swiihwiIUccyChxB8kGDAkP23LTZ7y46aNkA/NZDXqdYwyIYvNuJCRB1NH8tt1GRUjazRB62f6So+PASFLe0eS4DdOzZKE9yJytnxUbW8L2MTc8KVUqEkDiNhPy5u/rS9r8F5kBMjrN3A/7SIczKDp6epL1PAQewuGIFNKSMRB6YxC//KWDlZUC2nuj6O7JiFOdMIhNVbF/PMdsbAeGW9DbnUC+UBMELhI409DE8zeVMCWenXZp9plZ2qhOZDKNIgFqJMmGixoZu5oroDGM6jhyqB29PQmxFW7C6UpLGmPMftFcwMQy2cI9YSA8j5kTn/zB9cm+dXXEwJSfl68u4PChNrGVftFU1hxv2nv5SX+L9vvZO5/8QH9Vw5qdnRC2ZIp5EIJcC1HDtcMASaIW7i873bhDOUGtQTkbNs3C4l38wy9+h3gyhuzqLXzo2KjO3cFqVUFYtXHu8gT+1V/9C3THDfHUNSNRJMN5/OZXv8Wk34P5mWtYmFlAwQrDZb7L8iJq8T4MD23fJmt5Cj//yU+wGOrGaEcFb79n4xuvHoUzfRb/21//DANnvnefuHtlXDn3MS5NFIQB+Me//TEuD3RiduoeCpUSxmeW8PKxLswseUj33mcItm/B+qt1yV3BlXsZfDSpAdUSyjWGS5VxZjiPo4eiMGhLFuKkiDY5rFah27aorKsWuRUPucWAMLX1qsh0qgFRJ0TqLiPdyCRQiM0ukhP3kWwLXr4Iu+vb/Hh+MZGJB1QWF2BP3YCy8Bm00iL0ygpCKMPQXIQFbE2BL44aJOaB/EkUVc6nzUK8GsSv4mhYzTPsrwzfrnNAG6VXIXlMPqKi5qqo1AK1EQtvSMjsLIkR1do1mIjGzDVkOV7jfcUy41PJTBClLIQJjY4u1QCZZi+jJT4CShAKF2LCGhGw91LCA/dyA24Qv8ZF9qfm+NDMKJKpsPheNCdWYWKktrYovvXtEZw7Py/OO8ePtYuWodnRp1EePwOiTwCZQN3eXCfflfyug8DwfqrMdTqgSc5wfV0bifZWLCVx9pNZWFWmpQugXgnJed+BqLl2KVHKYNx9X29CcOWZOra7Ky7exXz2SR90OkynDRw90orLV5bFu/nggVYkk6Yw50+6/qdVPhkZMm2GoaFQqAqDRpOhANw8rUZ8yesJ1nWwHmQoBKK57jzzuY6Nj/nb5/HhhdsYO3EAHV1daAsV8J8n5/DNH/45TsSL+L//r5/izlJZiLsWimL08BhWJ4Ffmy34/g9+iNDke5iZ0WHqDq5duYze3n68cezAJrv3+o4uTk3h+s0FfPev/hVOJyfwP/2bXyLTmka/WkWYuSrsJmLoq+g/eAojL3Ui4S3gf/3w/8AdxYWmm0jEPeRmb+NS2MRLp06gLb4x5nx9vVv9EuLO0Cmtqx92YghWviC4pMulCt6+eg4zcxPo3xeWpA1hQ0WtauHGZBizSEJ3XFgFNkhBpkNBplNBKBzY/nYjqUuj6t7FTg2Yn/CwPEOFsY/eUaC1J8iU1rxRb9WRPZ2ncxS5/lufwrv2aySr04iqZQEGURNMYUbywxSdgQMZyyaB4GmeI8yrQO4qvmRuY/IQuaeetIUe6PcWbTjlPEJuGaYWBZHr6qxB/TNoMZ8k7lrVUVGq0v7DWHNKl0GZjbrLlo9MWwTDQymxTzds3uQ7iiUPrfmaRC8srVSguFXoDMETqhbUs6v/iYJXC2z3TBxDcvS4SRLHkKryXFnD8PEuAV4hUWo++G54D0FKTp4I7On3Zgro72sgNzXfvf57M/Gvv5b7/gFNt4oman21cpXP0LGPTEJraxSTU3nJgU0bfXt7TM7z2QeP4Bz9GAgHe2CkBaVPGRucxbGx7RmTB8t6+DO24wsxP3WqEx/9fgZXri7i9Knuh4pNfvhWPNknOfxksDOZsCSSsWoumKlPuNInW/VXpT/zI+AiOz+DvOOgo68XWn4Rl6/cRJ4hfaqORNhAW1RFmWA9a4eFy2d/j0W/Ey+/cBhGv46K/inKTghwyrCrebz3k/+I9oMn8dKJUZg6938H5XIZLhFBo1FBuMx0dGJ0ZB+ycxO4fucipq7fwCvf+xMcPfUGRn/3KazmDKlaGL39fWKi/eRXv4PfNYIf/PBN1GZu4u5iGUq1Chgurpx/Fzdud+CNN95AR2Jvfgp1yR2IEgN3Xwi2xbRyHhwngpXY6/h4YhRX7hQRM0tQnBqyS8BSrRuR3nZE4w7aujQYUQVmOCAEuybqJGqSChQo5nzM3KT3vI/9R1TUKsD8XcaF07s+CBHbdD9dezlbfOFOrVBqbqCPBBuwGjJQml+E99k76K5ehhkx4UuMNon5gxSNZJb0x6pRuibkYaDKVTVNIBo1sfxRLaoiQujRoodqIY+owlSdgCs+5iTirL/xFxBvklCqclNRX5zVqCFpEO5Gr8h8hVTarxnyQdjCIDVr43o6qaA1HRHJf2WlLKoncSYLutu4bd2nMBV1QsYmsZ1Ek1suaoibPuLmfbQ8IZKPgSlnnUQTox9Cy75eHDvejWhYF3v1usZxlEjgbRf0yKV0Nj2dR0smgkQiSBCz8f7Nfj/MnBHCQSlEUTAynBZs7XMX5hGNGnjlJb0OybnNwIoWwZOsW0xqcvHiPObmSujtTTw4sTZr9GM4RzV8gTCitQCMhIzJWnTGYyj/WSmC8deyqr4CsXlWXskz0A4F4XgavX0d+Na3voZ7v/tH/Prvr6PYMixOtjXPw4rlYJThTPXDWpjGtUu3MHT6R8gYgNZ1AG/9oBe3rl7B+Ys3MDs9Lqa7q/NZmIkkXhrtRjm3gA8/+ABLVhKvvvEy9ncmEW/vxZnTx3Dxsyu4UVpErHs/WlJxsV3TDPmAE3mtjHMf/RrvjFv44V/8GY73ZYCDAxie/Azvf3wFs1M3kFcMqPdmsFjN4C/fevHhssKRKDs1H06VYTQM/XGR6TGRaO+HS7hTxUW17KMAD61pDT3DVB3TGzFQpzfADxoDttOn2OK9gIgvzXiIp1V07lcRiQeAIaTJlOQpIXf01yX4ZmZruwpIqaiath3Y5bIAzRiEJyQanoQ0KbDmJxEuzECNEEw1BHWL/ZqEjdL6Yk7FataCb1egejWoXhXMWb5QTcDwylDhwFN0IBQXFWhYpQ98g5wrcFUTtgCFNhoexHvypesGHU0C27SoFjfYqDXFlzA0toVSLj83MgA8qYjnpw9T50AFzEOjto2fvMo0sz5TqnpUTwchaZSsGfrG8D0xi9dzvJO54V4alMrBejA0rLlGlkPzCseOB9tcLHuwlBgGj+7D8Rd60NpibkrYG20loaVX/L59CXGWy+YsCcmh2v5JH5TOI5GQ2PCLhZpg2DM/eyOUbqf6JXwoFRaiPj2TR3t7RMBAmjULO5XxMNfJ1NEEcO36MnI5Cydf7AwYzC9QXhXOJR61miPzShjZx65jepjR/+qZz38ENPSOnsC+Kx/iFz/9Gaz8Ks68+cco5ebx8T/9Ay4pVeRTnRhI+ZianEK8tROlpRxuLbj43sGeNYe50uoyrl+bxcixw7By7+LOnSVEjW6opC2SFS6JkUNj6HEMZBIBOp5rFXH71k3cy/nojsZx6rUjGOwhQH0OFasCWxymfGTn7iHvq7j0j3+D//3fv4OT3/8RctPjmIub6EoruH37LrR0L0b8PP7pwk1AjyChkprs7ViT3Dc+JlKPy9zZ/KNjnCoCsBkBEpkg3EE22T1utFyYtK+Xi8D8uAur6KO9V0V7H8V4SmsBIaDEzkU7PxHE7JLA8/pONnhF02BXLNT+f/bePMiu6y4X/fZw9pmnnudWqzVLluXZcYhjO3Eck+mGm3ADBPKAG3h/UAVUAbcooKB4r4p6BRQUBeS9oih4PAJUuBBTNwlJnDjGJo7jxNZszVJL3VLPfeZ52K++3zq7+/TR6bkltaReqtbZw9prrb323uv7zb+xk7BSYwjqORQqOtLBnXANHITL50NydBSp09+Dq1jArB6GqZXE7UrE7nUTwcnk31xOx+xcFkZhDpZGvYkGTa+iqpVhV3KwqilGXpfjxXJeuD4DKmsXVx+K8dPpHFJFxlxnBzTkk4SvsA03IgGP9KO4RmcAC4+SUE2OXjDfOd3wy3mhSFbcPxrOLbUr91fzjefaSPDmHNAPnxIGAjyN+JhohilmfW4nlj2NClUkPGehZR+8yqGHeZyZ7HiUbRZoqGaE8OCju3D4cKe8UxS9r1QIhkwKQn92ishboh74/avn3ldqf7nzfB78lmk0RAMucoqrBXdeyzmg4d70TFaSuvT23HrjOs6Xy6VheDgi3Hq+wDleeJeWu9+75ZwztzToYmITEsQGczuslcO4W274nhonGQPnb6M31rydaP8BfPRDRRw9exnRHQfwnsceRmXuMv7zu+8gXjXxyQ89id6QhYszebhLFYQ6+/Dij30CB3vr/OQNC0MHD2LPvkF0BSz4o2fQufcwDtaywrm8AewY3rvoBoxACw4/+giKJ87DCnXj4YceREeAhktePPPBF1H10UK/ilKxgHxVh7djEC/86EfQPxhFMhZDkethGYj2DKKnYwgdxgCq3jBSZguefM8+iZ2yqMMVdgTcOUUEXMfyvZ4IVmCqrKJKeRvlIvNdqxzb9fVW6EdOk1snNzhz3cb0WBV0dRw8qFzlaFHPPxZ+vOTYW3vod2zgxuWKgH7XkD4vKVA1F/+vmS6UixUUzvwAHfHvoTtUhN9tolAt4dLoVcRiE7A9XnjmRhByT6GtqwOGy8T1MU5sCV5rcThdLuyM0jY1XYBeiMPSS4vDxxKkBSE5DgW9buRlUAwzKxCpmZIjvTA3gUo2XgsFR+JAUSq2NyIBbsQCf/HtLNrjnAhRsMQ6zdOFElUZFWWIKp0vamLJHeqJ+SecNoAbCQPBIhDy2IhnmRBHQ8Cj8rHLMEBDGuo96y0D1DfLdqQQGCkF8DA9ahXpkgv7Hh7EkSNd0HVbFuUlB1R3Qt6Fqi2cM8Nfnjozg8MHOwRwVwu0dc0tuamIFD5/dQMUy5P4oHsdLbJNc+Hcko00nKCRH7n/oN/CzExWrO6VNKOh4ibv8tl0dQYkTG88nkdXF1PL3HuFc5vLlyWr2c6hsPj3N3pT3Ht3fffeUblURi6XRyWfh1kLJrXRu8nn6M5aaDCm1DGw72H5m2+/dxc++uO75ne5sWfv7tq+F89+oHPRuUBLJw61qGM9ux7Af931wKLzTXc0A/3DD8jfovOaF48++d75Q+0Dw2hn/zuG8Nz80YWNXXv31Xa68cGPfHLhxBq3FOduA4W0jXxGcWtEJVm8nYW61miF2dvI4S2bDaVhBOTUa+xcLm1jarSK9CzQ1qehtVdZ1ZNbbyxcY6kqiHRxUTVw7YwKVdu7mz7pipMV+BSvCh22biIXzyNz5h30p49iT68GyxuS+3AHNOzzl5HJnhLluadTg9UfkpR8BKNkyof0XB6WWRZpAfsm15otGpiaKwM5AjvTCjp8aeNoF/Yb6zBmnHDUJYrp6/zbSb1UK7DzKRH1Q+LKL7RTv0Xg4fMgMNC6eyn9Kd26hDJaggCob1O2+dzLTE8KUDBDFUu5wpzwusSdj6VpJW6jO1IRUT0JM6fIq9HwfnDXEcOzHgX3hsGUpIDu9qF/ICL2AvTDXktR3Lsprl3HTkzizLkZHDjQJj6zmwHwyj6Bxoy2GMY4Ee6ujMQFlHcNq8h3awcOxUWT82c+do6VbTsExFrmYG11VcjjYEDFAC9IfHC+Nw0PbG2NbqnalFDx/vbva8XISAJHj01g755WkfBsxjuxpW72HhgMPYta2jqw9/HnYJeZ8nVJofGa7rZYKKBzcBdcTJO4XRbNgJphDUjFqrierMBlVRBqZT5bBmhRYnCCCyVe+TTg8WkwOI9cJxpApGFXcZDMuZu2MTtRRXLGFsJg8AENgZDSrRPjliw1F6Zot+J0xy5UxJK/a6clHDPd8Ir5CiqFLKrZJMrXTqO3eBLD/SWYbnedT7hynaExmpSa3l3EebqGvm4fziSyKJRpOKjuq1AxMBmzUUgl4EFuySGufEKDXS5JyEWJWKZYRLlMOF/TkqAQzKW+4tJLPfYyHdaYzmXrNF5Ofb7HZIQkRpPTEfDY6AgVJVvcZJJhhjW4TaWPr+9btusPsGEZ38JdyBaJtCoQ6QjB51exqBvHsJp9LtgUxx9+oBOnT0/jzJkZHDrQLsaIawfdhR4J7DRAu3wlJhw63cYYs75QrGDkahz9fSH09QZrrnYL161mSz0PTYLvMB43xfM93YHbALIkIACK5Bnn3iVJQFYz4runjiw/mkp1Gw65cfFiDOfOzUHfr0lI1I28E3fPLNw9I+Wy99gTT+LIw49s6qD5nvMbXk3K103t+C5oTMCdEx/uNJC3dMyMAeOXKtBNDcGohkiHDssHFLM2chkbLd3Uhdd03w0W1M5iLuhiA9mEilaXS9FoTBPDOLZH4oCg7oBR03kSgsMWS/pQqwZfiw5vSMP1Eylkx6bgt9Iws1PC+SKXgZ2dQ2cki330y3db89mNnLYV01JDo/mBcgw2Al4T/oAH+WRWiJZMyUQqXUY+lYDbztQQtRHJnJbX/8u+GfKQAVhcuhJ1r7+1tV/JaWAc95CvWnumFA8ocTqJDVrw5wqKCCNZJNO2XDdUp9TOc7ZEf89Y5yUTQ50RBATcnRrLNdT8HMX7BN5DB9tx4tQUzp6fw4F9bcv6vzdvSR3le8+UoecvzCGdKSIUdCGZzGN2NivvZnubX8LZ8jmtl9uWMYfc8Ppc4rZFcL8the6thgaG2y1VKkJA3g5f+9tyb7VOuH6Q6KM3wL79bTh+YhKXLsVw5EEaESoC53aOZ7uv5WeA3PU2h738HG3m2XmxPFNbtvbp8IcMpGNAOm4jk7IRn67AdCkwj01XweRmKt46ufnFHxA/Nrtsg4Z2xbwtCyfd5Nr7DITbNZg0WKc0egUjPOr+8zkb9jTwgM+F9FwVc9eBJzosdPRWcPqHZzB57rtoD2ThcVWUCN1ywePpELCkCHu1hWOm6DgatnAtFUC6UEUqnoaWnYUFGsexpebAroSfKlnLavtbqEeo1GFYKsMZPQ/WMOyFZjZhS56bQ6hRVF+hKoG+oLxvFVSosRvOizMr/C2UNcxllH5eiD9bQ8BdlnfEE27B4FBU0qfWx3FobHM1+1zMw2ELe/e04OSpaVy6EsPu4ahEZ2v0l1+pPVq+pjKKoz54oE104tSv53Il6IYu0d3Yxka4QM4tOWeGvKXVPVUnkof8FonIHVBjv0M7IpKH+8yZWfD+6HK5kXtZaT7v1Hm+EzR2HOgLSY6B6ZkcersDqA+MdKfGtt3v9gzcqRmYV3xwrWE0OS7aoTZN/ij2zqWAQpaAbMPlMSTIDHWprEORnySKcUYv8elVjHqPX4PXD/gjtRC01dVHqyPs5TNAj2ngI496JZpZOmuLC57b48Zw51N49RtjSE+fkqAiDBqSq2pIpisolGyE/E5MbQd+FIfOdqmrc9ZVcpZcaFk62twIhz2Yni3gXGwaup0Wt4eloZutUW9Al7aKhKJ1pmHhV3qs7XK7rpAb1E14/T54PQZssbavO38bNhdmR3XG+aDB5lyGVuHKIj7sq4o+3pkzZ1gUtZO4Y6Hffa6kgzngmViGaWJLZQ2xFGC7Anj24QEJVsNFeDMKxei0nt+3txXnzs+K6mDXzsi6OHgSgj6vIRHPyMVLzIGwcm2hjpogudHCJsIhC/FYHsl0Aa0tPgnzvNF2G6/nt0tJAQGecbxDQTf27lJE0MlTMzh0sFWO30u6d2cO+G4xiVA45MHEeBptLV7xa74X79W55+3f7RlYbgbmwd2pxMXM4ayZGMXdrsTnYj1cVOfoDtc5RNm8c1XtV3yba7rXGmcnRMMKnHp9KzTqKhRslOI2Dg26RMdPR7FQXXCyrt4deN+HPovXvvU/kZg4LmJlhvfLFgoYHc+hPWqJcZTELyd/rOsSYIb2cH5ZyJWVN92zsmkCswoOEwmZEpecHHUFjAbUCH88ovzTCfoK+Pmr14ztGieEenSeq6kgxCquVociDMuLQDggKVZZieDqFKlV46brDjunN+XXgVq27/zR5Y6Sl4i/DK/LFo6cgE8Ad4COIEIrerq6UerBmOZMB2sZtuSCt+gVQQv5nI7+PV3Yu6ddQvU612908GyH7VPETSCj/p3PevfullUDvEgeNGBmhoZuypWKhooUv2+2RxUBhoB+7VoKU1NZCcaz0Tmov17eD97LbA6Tkxmx7Pf7LNAvv6cnKLYK756ZxrXRFHbuiMizbnxT69u7G7f53EiYMYrhydNTEpOCoX+Lyxr13I13uj3m7RlY3QzcBO71l4l4mwjghIgtKU7dGyR3rkTsNwE8GyCBIIhW39oqtzWgVLThLmroa1/as7u3rwfv/9Cn8dq3TMSv/xAhXxk+o4Dp8TkkZxk6lj7fzBlFaYQhfrDctrxudHf6EfCZuDySwOSNmLhmedwWXJaOVJaA5oJuBNFo6ye3xAxybNOuoqyp5DEONIqlvKy06l4J+xLYxq3BivYAlUINRunuWIHuCcLw+JHKsb3F98oAM+SA2RydFMqUODhovMRUyvhWgaAENjLRsTSJHmUl7riwcd50TUcqx/us4kbMJcRTW0DFG3Bujxb1lqsqBnjk4Bn9k5b1wj0ykU3ORrSjBY880iN+1/RH3szC2yS3JglgKlWcuzAnsRiGd6oEMyvHGVdujNl8GUyTytSQarY5i5tbKC2ixXx7u0/07sw25/OamyYiJ+HN+2WIXlrGMyzr1HQGo2NJ7KkRPPk8k+HksXOIb+vtsNjf3DlcTWskohjBkDYsE1MZyU9A4m+be1/N7G1GHa4OK3sUbUZP222sPAPLgvv85bX1rlyyBXgl3SK9rlYAm/nr17DBRbtaBPoCBgKe5V+Unp5uvP/5H8Nr3zYQH30LIS/92qvzi6bu4oJdE8nXArPkU1lcTuWQqbiRmJ6DkZuAqVWQsg3hxcu6DzQOcNl52a8fOtsqwYuKZsFtp4WKqcKQuopDd6Bv4SpOkdflQrjVCxueeQ6/WtWheUII+ykBqUg8+XpcTuVN2IYL0ZAp+ZFNHxAMuCXr20LraouAymvjSYpWSqIuqW+rsT730wUDJY36fiBNcGMiFcsQNUswaiCeLGMykYflsiWAzXSKue1VS5wHusyFvBCpAwkCKbUf6nXztgu7d7SLjzWTqdyKwm7JwXd3B8Uin4ZxlCIMDUVgN01XvDAK2gXQJY/XCzUqIWcXzm/mFgkHWq7TAp854pV6gu9KbcI22BnlQ3x+AZ+FdCojLp4MfUuAO/nuNJKJgti/7N7TIsTXyoTPBgd0hy4niNOIcNfOKI4dn8TY9RSGdoQlpoLzit6hod0n3fJNLMO21yCqbTIzVI8RY9b6hcjqO/8fN6qS4ZG2OHQhpmRnI8VZZ/n9mubNaWY30vatuHYB3ClGN6CyuTEUeyOjVXM5UH66t2IoKohOybZRSABDXSY8bnlSy3bW3d2FZ1/4FF572cDsyBsI+5l6cHGWLyV6IAevwWNVEc/mEI+VhAhwmyZcKMNEWThVaFVJ82fWgtHUd04/dXKzec2EqZEL5+JsSBY54dpZedF6zZ0qKloVHjsLW9LI8QWroqhZsDzM+V6GqVclUIzTF+86XwLm8oaI7bs73MJtMWcyX/xFXYhkQsNcooxUPAOPyZCciw0dnXb5y7bpd16w3TiwN4pwwBTOledoRMbzlkvD9FwRFy9WEHVn4bY0Ec/XL5A0ALQYb6ABt6laoF+/JxTC0HCriHBuJefkADzd1aiLv3hpTiQw/X1hCUJTP2ZnHjg//Dt/fgbJRB4H97cLZ62A3qm19l9+/I2FkdNS6aK47+XzJQztiArXPk8QNV6wjn3eI2MgDA6E4PWZGLmaQLFUFiO6UMhCsVCB30eDU+bSdiwn19HRXXAJ30fq3vv7Q7h4cU7Anln6KJ3ZzDm/C6bi9g5R01EpzyGXfhPLx9Jcelg0pCbBfebcLPp6giLpEkK4fsGrfWOiwhQ1cC2QWO215vpIQpppnxOMe5IpimFlX19IJLbSVH17Sw/npjN0zWcI7PEJP/bueww+n0pRfVPFLXJAgTvd1tI2UnEbxaQtoENjuPqFm/PBxUssodcx+EULX+0BLZpjjX65NuamqvCmdfS3m6uW8HS0t+LpD34Cr3y9gOSNdySaGiPbLWqfuCt6bb4ROlx2Du5qHCpM7AJFR9EsRe4cohLTLtysAnARaqq0VHJT9Tr3hbpqiyOgzr2mqRek4VvIgZjiwiP+7TIurtAL1zPUa7laRLVchMfthWUyKmB13hjQqckhcL2emmE2uDyYEl26cSo0/mo2siUToYhPjAg9Lk0t+KwnH4jSoZOgyGYDiE0W4bHKkkymsV3uN84x9dbFqoGurha0ieFYY43GAW18n+MgAbFjMCSAfv5CDC7TlDzulbKyqXB6IaiTgr96LYGpmawEQeno8As3vdLiz7kWwoBTxe1ao4pYUGPgM3LmiRbcHNfly3EUChU8/FAXAgG3HNtsgod9uiwDgwPkVKtyf52dflkk2ZczR5vdrzOvW+WXz5DPYMdgWDwTLlyclX3GK+Bzutfv/849BzIUBVRK2abSxdWMS68w2xrtoCZx5bKGoL9DDEAZKZKrCFdeZiBlyVEKVqyKZ0ueqrVyVZI88d3nPiMXMiVwe5tPXHD9/jhKxeRN69VqxsU6VO9UymWMjaZheR+BZSmj29VefyfqCbgTsNuqGtKjwNGjJQTCwOH3maDhHKPErVTmgbu24DnLufPLGaVOlzwnCQZKBRiVjmJ+Md5jH2XAKmjY7Xbh8EEXuloX66BXGkNHRzueeeHTePWHR35WAAAgAElEQVSbOhJjP0DIVxFd+/wY6hqo2jq0akkAXiiW+WVaVVKvj3qdFi7jvo0Kk8xUyeUzdvzaUvCptujypkNzeeD3sCdlFyDn6geraTD0Khg+NF8IiKhV7BgWBiRbbIGcciFXFPWCymvdUKm2K71x/jVT9LKMJU8xcWPhAkgJTV+PH6lUEdlCAkEvCQs1M431nX2+B5QKULWxY6hVPi6C6+0oHDM/wF3DUaHUz12YEcOy1tbFyV6Yq316OoNLl2MCfD3d5PiVPcFy4+R80NCObnJcSCjaZtY1Xktxey5fkeOiuedEEE11XdLFcqHZt68FkYhXgHe5fjZyjnPAv0jYjauAvDutLbe2z42M91Zd67wLjFhHW4eLl2My7zuH1udRcavGee+1SwBg4Kv1FSFQXRr27unAiZNTOPluHPv2tEhGRjJIlbItNitT01nJ0lYtK7dSEtEOYe5ymeho96OlxSOSqgXCm59kI7u2+nHSM+z69QySqRYc2b1nwyL+1fe8/poC7lzeHxiw8EhQR/K6ibfOF2H5yjj4hKnE9BXFtSvORYUppQifD5EPhGpLAW0xYqtxjjVVJoG8UrRRyWsoZmxJzBb2aoh6dPh0XYLhB30agi4dbSEDg0wha63v9ejq7MDTH/gkvv3vJcQnjyHibwLwmjL8og6CIKts2VczgRqK8KGo+eGymQWuEfxX04YzZwYMlxt+j3rZ6jG9vhUa1KWyRczFC/B7fUKHcL7rC2PUU9xbKZbgYha2ZT4tPr9sXoPH50Ek2Ki6qG9VcaF+j45gyIv0TAqapqQZUqtuDHWb8vHkyzpa+1rQ3aVSnNafX9zD5u9xUScI79vTilOlCk69O42HjnQiHGS0wqoQG7OzObz77ozkiqduXriCFQZJooEGaRcvxcQK220xx4EuyW/oicFFw3Kb4hfvMg31nEivlqsigh+KuCXhzWqIiI3Pioo2yMAuVAk0vi8bb//uaIHvAm27BgbCom66cEFx8Lt3tawrJsLdcdd3/yj53MJhNx442IGzF+bwwx+OY3hXFKGgB8yuSG8QBrIi0crYEVQ3MakTQZxMKr/VeQlNTaK30VmhpG96JoXr4x4M73oPolFmetv6RcCdC4Bu2OjtAH7ieR/ShQpGR6qY7qqicxfTpzI0qS25zLM5G0aS9WkYxAgfgG5r0GqJX5hV1aNrCJgaLGiwNA2RgC553/2W0ud63Rq8PG8y1SjFiZpKWrMJ89XT04VnP/wpvPp1G8mpowj7SIyQslONk2zgSyAcFg+ugo7gpUUtgLJGQ7scLFslh2EbVRIJtiFx45cDVufWuOZUdUvymFMkv1Th0FyGhoCriBs30oiG3QgTkClirV1EMRVf5kSqhFKpBL9HidadNuXWHPGxRJ6rolgx0RLwIOCjb5v6GJz6878k0mo7zE8fs02kcyruPilYEnN89h4XY7GrijJHTBls+DC8WyV22agOe348a9igMR9Bbd+eNpw4PS0cwOFDHRKSlGJ4Br4J+F2QRV64cedOm3eidOYFnH53RuZ6966o+MSztukyQEkAnwEzlCkwXdBV0JCOhaLGshjuNe9jM4/yvfH73fD7TJSKJG43s/W7qy0CBeeD9hhuty5EHQmxPbta5ZkJAXB33dI9P1o+Lz6XaIsHjxzpxNXRBC5cjMm6x/DTe3e3iBcEgZ3FecYO5672l/+m1zKJlIQW6GJ9vYL2rkcxuGNgLZff0bq1pbnmugZgZ7eBn/uwH5MzVZybLuHS6TL8AQ0+UkZ5oEvXES3r8CUUVxD0aiDnHfbq8LtpsMY/KODWdQXeBPH1SLDXOTV9vT14+vkfw+svV4SDD0vETyXm58uj0upy4eNLwNWPsMw/ZTDHLW4756pwiSjbAoE9M1/btIsoaV6UNN+89XzzITt9CJ7CNi143crVrJmo3WmDoyOAMvPR5asZtLWomPrkGvkSe9wGPG4dyVQBbqMEi5EE5eIaqDDQjE2dOv/o3aCLPpxiLGahK1Ac7XTGXwF1ZaBiuZR1KTn8iwUdcwWPiLqyySxM04Stu5DNZsFMeXzebpeNTNFA984uDA5G1awuary+o1u7TaLCcusSe/74yUlcoYFZsYLjJ6dE/3boYJtw+KuJ1kZJwMREGoVCGY8/1i0eC851XFD4PrHw2J1OOUoOg+NJJHOIJwoiqbi1M731W+cz4rPpbPejsscWYy2mEKY/PDk8BxS2/p3cPyPkO0ypF21IhoejojcncczYDVSzcO2jbv1WF66imm5j7HoeLmsYu3c52dpudc+b0/48uDvNkdLfM+DCngFg55SJt84V4fdp6AwZ8Fs0ciCY63C7mMylFnhFZsFpYWv8Dgz045kPfwbfEQ7+JELk4LWakZ2AmIEcwqJvsOwsqppL3NRKcMOmL7us2lWYYMo6ta+DKV9VIQHAfauaQUH3Q7fLMMDMcYsN+aoiv3DmhByxCjnrdpOAcMDYOb/4l0Oga1fQXUYyEcNYSuW4F1KE+iPDQNm2kEnnYDKGe95ARawgGbhfcdjUExPsqhXmva6iCA8MK4N8jqFQy1KP46BmnLnb6S1CiYFm6GLw5/O70N0dEs6UkfROncoxxjDaOgPweHwo5ApiaDKRyMEXieDwkV4E/KaA6eK7ub17DEwTCLjAHOp0i2LKVfqZU2RPQxsuHisVgiVtHqancxjsDyPgc4th3ErX3YnzHOtcLIfR0aQYFJHLaWtjJLw7MZqt1Se/I6plursDYIwBusgxPgKJY9oCbZetOQNct4hHjBzKX4L6ar7bTbsbifhYRj5vIRodlPVw09q+DQ3dBO71ffZ2GPhkx9Y2968fb+N2b28P3v/CZ/DaNw0kJxnJjvpIl4RYzVfVC0OXrhwiMFCCQYgjxUI+Xn515OET0buOMgwndF+tIwI1XeaqtomcFhbLe3L8C+sF5QH0g2dWOSXqtsHkCYa4kTWOt9k+26KvebtZT1jYYNTcTMHAlcksSpkEXBrHTlE7jcMogVABZUikcEy6VhEvABshpOIarCodVtRI+eEUykAia4jhHFw2iiUgIYZ/boQjAdF5Ufzc1RVALEb/d2D3jgAMPYCxiQJuTFfhkpC1lAKpvoU+anZTt+kYFwP+EfgYZ53iWYrzVrNAkKtjIRAwvF5vb2DeZfA2DX/V3fC5UBd54VIMXq8BBvJhIBsevxOqkVUP/DZWdN5FGhveGE8jXyjD41mb0e5tHO52V7UZIHHK9exOFXpdaXoW4xOX0dYehcfjhsuyYNEaeYuXZcF9i499VcPr7+vF089/Gq9/W0Ny/B2EfVy0LZjVAiw7Bp3UoOaBaReES1fpWzR47JQAc0FThnQu8Vd3wbDp375QBL5tJiz3w7TzoKiecFpfSBiIiJ/iJt2NkIe62gWXqfq6zba5MNW3SZ6fsEzjNa2cQUBLqiOOf5bo2VlDKdwVhLNlgryNoEdDS4CR6VRvxDGzpKFsa6CaxWspcbNtV1AoZ5GJFXEhloLp9YhYeseQD6EApQQ2Eqkyro+n0eLNQc/ncOzta3jqfbskw9pqQLTZ/W7WMc6bSD4CFjo6fPB5XSgUGQegZvS5REcERYrjL12OS9S3g/vbYFnmqoiCJZq8ZYfFfkSDABb1/g8c6qhlgFsI5nTLOr/LGiZQ0NCQoJ5OF8WrgF/qwvdxl93QlhoupYVcUJy1j8xMbQ1qNk4yADqlg26YhoF8Po9SWcXooG6J9ioWQdSitKwgf+q7pCGwLdJC1ZXTX7NO1LEFGSnXTfW01TF13jnW2ALXD5rN9PcZuHbtIk4cH4fLdMPtacHefY8iFAo3XrIl9qk2ZbnnwZ032d/fi2df+AxefdmFG1eOiYW0CwTzoojcqUdnofW8rRnQbcd9qwq3nRKuvsSs7loYbiRh2bn5SHPkzIuaV6LUue2scMn1QMx2ndeJ7TP6HQ0KDQbLWeeqQnAqVnTMJcvQirRkZ0MLvvqqz2YvPfXptCbURURfr2/ki6zaqflEy9g0uF2AZZRRqZZRLuWRnDEQg4Vk0CtWyLF4DkY5h5ZARVQC0zdmMDXViVCwReb0Tv5HrrW1zYfr4xmcPTcLGtYpvbQyuqsx54uGyI+ZRAsDwZw9NyOLVTDo3rK6WVmAaHjpd4E54xkN0HapGPmLbmx7R6Q4Xq9LDCKpvqDLFCU5NFLdLuudAbX2GGYYlqXSGVP9Z3CNqdJVlIzNzfNLYC+Xy7g2OoFcNoeu7i4EgyG5hlIz+pRPTMxidm4Cra2t6OzqFdVisViQXCEui2pUNn1z2/N3IkbEOkyXiiZXLpVQrlSEcGAcDN0wQFddUU82GaPTTjis48D+ikh7iBI3bsTxg7e+h7b2HvT19cr4nLrOL+1vpqamkEwm0dXVhXD4ZkJgdnYWFy9eRLFYRCgUwp49e+D1Li0pn5iYwJUrV6Q+CR+ncB0noPf39wtBND4+jkOHDt0f4M5J6O7uFCO7r3/VwPmjr8FXTYteeQGIlf+5ClRT47SFNLTF9Y3i9zwiKMEn4neK8Wlol9NCQhR4bQZI4L/FIOs8AL7gPOd2u8SFarl3cuGapbf4StsVqgpKsKVL9ZIvgnTZWXSkaYMEOflrclbGKYFfSAnasCpllPiXyovJYVC34eY3TYNL/lWLmJxMYceOqAApAXaj99pkWKs6RJG82zLFMv7EyUn84O0bCIcI1Gqs5A4aAZ5Pib79vHbf3jYJ4cpc79Tfb7VCTobEysRkRsZJ9yBa7G/r2Zs/KS6CtLdgIKHRsbREL2NyHT7r7bLeGeAKqiMWD2Eu7hWXWX5DbsvCjsFBBCJ+2DeFO1UeTMlkAl/8pz/CO2+/jd/+7d/BE08+JWpFfpSZTAZf+8bf4P/9u7/FL/7i/46f//kfx/T0OF768ksY2jmED3zgGSUlqC0u/I7lKdY9So5jcnISUxNTwvtEIh3o7e1DJpPGyKVrAvQ+bxi9fX3w1UB1KSNLrhWKdNERiuTx5t9+Ef/H//l/4b//95/HZz/72Zsmj4D9xS9+Ea+88gp+/dd/Hc8+++xNdd544w386q/+KmKxGI4cOYIvfOELAvA3VQTwwx/+EH/xF3+BN998U6QYbN8pJJJIPPzKr/yKgPvVq1fvL3DnRPR2d+HFH/0YcrFxjJ/7HhjsiG5yyxfRXgv1adlpFHU/8loIGnXbYqCnwVeNi3FdI/fc2C6N9jwWRfLNaNnG2svvC2TrJoq6Dwb9EGviMF1TsnZ+cqxj2EVJSeucb2yVtZnetVDSUKoosBPAq/tIeI36hpQRpdsA3HJA2H05x10RpWklXLs8hf7+VokYx+N3Uu/LvqMRNw4dbMfsbFb00BwTJYhyCw20D48FAxYY3IYBa5htbqvpZrkA0QWPtg00FHz3zAwYZtZx79sGq8a3fGGfcxONeCT+ACUdDHayXTYyA5qE+87myvibv/knxOIz+OAHPoBjx46js6sTv/D5zyMSqUvpWdeVz9eCYLADV0YmkcvzQzQlCA6reCWrYS+uXp2W75b+2NmsjTe/fxzFkonnnqNNyfyyJ63Kp1z3PXMzna7iC//P3+Ly5Sv4H//jNzA4uEcIkK989RV8/etfx8/8zOfw2c/uh1aXu7yR4K8bsmxabhd6evswNjYqxINzfnR0VKQKvb29om4gF37mzBmk08xDcnPh8ZGREfGyIUferB6lH1/96lfxB3/wB5ibm8MnPvEJ8VL6+7//e8Tj8flGuZ1IJKR//rLcF2L5+RkA0NPTg4986n/D1/7VwI2z34XXLEFnirsVCw3QSnBXk6hoLhS0kIjjvZU56NWCyv5W14bSiqsD8r6RGDBN4RxMAnADeNZduuImAYhx7hnAwbJaJFkO+2AwOJV8jQZtOtI5oJSehQWldhDAl2APjH7H7G+Q7G+xLIP50KhOQyKrw/BXYDTJ6OsMTEBRdhxyWZ0hR0yXuHI2jh++eQljoy3oHwijrzd0RwGeuv/WFo/kgK9XRXDOGh8Dj5EbppX8qcsxMSBkjvCtApi0BaDfPFUNjLTHTG8M+nFgX/uqjQWd53g//pLYIyHU2uaVPAQk/FpbfVvSnuLueD6049HR19eDTCaJqyNX8NBDD+LMmdP4v7/wF9i/bw8effRR0an39fWJSPnGjRvw+XwgCO7btwfBoB+XL1+A16tEzQcPHkQgEMDwriG0tTHZkfpKaffy5JOPY3BwcF7iNjMzA4qr+V1TBN7R0YHdu3fPT113dwcSiRjOnDmFcJiBtYBQKCBGzUePvo3Pf/7nxZ5GfNlHR4WLJhe8Y8cO4YJpC0CApoh9aGgIO3fuFBE4x97W1iZgyjZ/8IMf4K//+q+FY/7Jn/xJtLS0YHh4GH6/HxSTHzt2TMCXYyMGsRiGAY/HI1IKjp+uxSQQ3nnnHezfv1+4+AsXLuDP/uzP8L3vfQ/PP/+8zBnPN7rdiu2CycBVhoyP7d934M6bHhragY/818/ha/9SxY1zb8KjlWGsyMETCCiAqkK382Jsx6xqVTOAikaBtUC4PDQiBo3RnCO09pT0s6YPjOVO7nojkkC+6lSd0/DN7TLQ6lfdkgsn8BK4aUmfzRQBGvvND0TeKFRIBReZEtdGKq/Lh9IRqghxMJc2MJUy0R6sSArXBSBXfaz0P7lKv7si0pEToxOYm+1Ha+te+Lyrcz9bqf31nqdrnHL4W74FUu18NgxxyYhyXZ1+Ma4rMe/uHSoLnIQmueevjMQlnSvF8DT2o2iZoH+nDRjv0PSsqVv5PgxNYs9PTdEKOoP2dr98A2t919fU8b1c2baFUyUouj1uAXKCZWdnp+jQKU4moP/hH/6hcKq/+7u/iyeeeAK//du/DZfLJfUJagQ4cqmPP/64iKsZ/ZGFawoL63z729/Gj/zIj+BDH/oQCHz/+I//KIBLMP7GN76BvXv3LgJ3ts9x8ZdjYBsUY5PTJQHBcwwA9uUvf1nAlSD5zW9+E8899xw+9alPiTj86NGjeOutt+Sa3/zN38T73ve+eRscZ4wnTpzAv//7vyMSiQiIE9w5borPz507J9v/8i//IoD/+7//+zfp6cmh53I5vPbaa/iN3/gN4dD/9E//FCdPnsTbb78t909CgXPE8b73ve8VooBja8bx84L7Etx54zt2DOCjn/45/PuXLYydfh0eo6iS1IukZ+mFfF7cXc3DY+QQioYQ8nhqYmt5BgIO9Wk12Vq+qINxFyxDic1VzfX/T6E7X3kar4iFf23IdJHjGJPZKsq5FKya0SB74jeiVYtIxDJIJRgn34TuMtEeKsNl2KB3R2uwgpmUIVw887OvtagrNDDxjamVMTs+g/Pn23DkSKfog++kiH419+Is8ARMhrtkXnTmB+fcOedW085m1uEiwf6pY2dSHEYspFV8e5u39g4oH+CN9CnvBv+rRf3aSFtb/VoGtaHtBWPNM6QwRfPdXYH5VNFbffxbcXz8NvieTk5MiLibYEpdM0Hy5ZdfxrvvviugREC9fPky2tvbBagIaqxL7p51Cb4UOT/99NPC3bNNAhoLwY3ia3LQLBSrk2P+9Kc/LaBOoG4szrUE2WvXrgn4khsnt89CQ7TTp0/P90nigDrrf/u3f8Njjz0mBADbJ4f9W7/1W/jP//xPAXcn7LTTvmM09/DDDwvXz7ZJRHD8NG4jofD9739fdPC/9Eu/dBO4E7DJ4dPIjuMj8cG52bVrlxjKUS9/+PBh/MIv/IK0xf5J3PzyL/+ySAWccdTf/30L7pwEBrr5yKc+h69Bw9jpVwWMZBFdwa9SXjVaKKKKkKeKliAtxeun1dlWiyVF3ImcjXROhdu9lSDB8ZNrTyRy0CtZ0YPPQzR1tZUM7ExG3N4qVhgdYRMhr3KL47UkDiyTYOHcw/p+SWDQNauUy+Lk0RGEIx7sHAqLCI/W6FtFzN3s7kiAhEMWentCmJhMi2je41YuOM3q36pjBPNcvoSLl+ISS5tJcCanMujrCwoYUbIgKVw3OAD2w2dP6QR/HY+CrfyMNnjLAuS9vUFMTzMk8YzEcaDR5Lb0Y30zy5WO65rb7cEDDzyAJ598Ujh3tkYRPDliAhKtwqPRqIi8CXwOuBO4qaN+4YUX8M///M84f/68GJnxGtZjYR0SBwRkiqUvXbokBAOt6VmP/TYr7IPXPfLIIwLYBF1y2uTQ2Q45awI6+ydIEzC5TfE5gZbnKDIPBoOyzz6cMTn9OeJwEgHOOfZLq3aqCngtrdlZj/03llQqBaoYnnrqKfz5n/+5qB7YFkX7lHJwvDQy5LioEmChDt7pq7E97t/X4M4JYKjaj3z6c/gqyhg5zVzEFYnSRkpoHhSFm3f+41WOH7ituPRa4pybJ1i1QOESM7exiH/7zRXXfUS99upyfgPMeJfOVlEtZOHWGPhGibacDsQ8UKfOn8lPbAlvS5NCx/mPo1RUuHPF4l/5zmrUiUOk0JDO2V5U29YkAE8ilcU771xHNlMQESh12DRSo7pCBalQ8f6dsK6L2tiEHY6ZH4E807qHyuMUZzt6C27NEx6ahtaoV4LDMF2r13P7PxWOma5tV0ZiEgKXUdVo/c9gPCRANgq+BHWm0CwXqxi9nsbo9YTo83fujKK7yy8GSxsl8jbh8S3bhCkpQNeukuDc0bj1gUPtePvoBKamMvD7I3dUQrPsjW7lk/y2YKNQLAj4PPjgg/PAToDjd0eRM7lnAhvFyARLcqvc5zkH8FiHIm2K9Pn+c9/RL7Md1uMfQZJ1yL1TB/3hD39YwI/1SUDwvFPYD4+TyCBhwD8SG+yXbRJ0CaQ0RCMBQDAlqJPTd4CWoE8CggDrjIfj4PVOYT+8Lx7j2PnHY/X1nft0rnF+WY9jJlHEP6cQzAcGVDx7SjxIAFDXz0KJAwkPFvbVWG7/itU4gi2w39vTjRc+8VP4X2UNVy6elvSqFbhrQRnUAGXRl/y3fHAGNNOAbjLW+8LDbXYrnHJaoZcqOpjljc+g7n1odsnqjgkK86EKrSGEiBjFVTRk8nSRy86D1s0N8iL+MRY5kU/Z+csnWgtbW8t5Mn8p9xmxNZvnr/pwaIHAYXhdZdAWhvflzIa6T+r0TbS1BdERtHF9ZArXRgxEWvxobfOLIRgtlw1dRy5XFiOXJu/o/BjWs+Ekc2EsagIBXaHqQTGeKIpVPJ+vaegIhy2QS+fHwoA3/FAZCKdZcca61PMkeLKwjaXqNGt34ZiNQqGKaNgj8zo+kcbu4RZJZblRDpP3ylS1E5NZCYCTTpfQ0e6V7HeMCcAgL7uHo5v3vi7c1KZsce45/tGxjEiI6LPOZ1a/2K7UEe0wGK+cud5Hx5Jo7/BL6ORbRWSuNJ6787yac+qzKTrOZjMiWqcOnIWASCOyb33rW3jppZcEeAm0BFYaqTkgfOrUKeFWqdumeJ56d+qaSRwQdHkNDeYIlCQOqNP/2Mc+BrqT0ZCNddgWuWSCo9MuXeF4ju8FRfEkPAjAPM6x8feZZ57BRz7yEdHZU7TPsZMAYN80ZGOfjrSBfumUGDhtknvmWMjlk9ig6xuvpwid4+Q4OG72ScKAZXp6Wn7rAZngzrEQ/Nm3U7hN/3faGBDoqdf/zGc+I3UI9hzHUsX4vd/7vd9jx5w83sD9WiKRKDp7h5CiyLowh0jEA38wAG/QB1/QB9Pjh+HywPL64PEHEIgEJEsbdcuLeePFM0iRfKaoI1/QEPXTSE2BMRcnB2JX+l3cotqj2LtYUY14XdxTQF8o60gmC9BLDG7DuvLfTU0QhJmdrqq5kS8DaXFFYex6Zv+r6cxrxK9aSKlaMAErAF8oBLfPiwCtTr0eJNIqWAWJl3ogjGU0wB3C8FAYbREX/F5DksykE1lMjCcxPZWRwCtj15O4cHFO/I/F77i5juOme1jpAME1kSzg3NlZjI7GRQSbShclMhazHF65ksC5sxOYnU5hbiaFayMxFIo2unuCwsGeOz8HxtPv6WGSkcVEmXD8AibN51es2muxsPmBN16/mrGzjfHxlFjzHtjfJqJ4x3hufcSC6pVi9+nZLM5fmJO4+ZynPXtasGMwgp6eAFymjisjCZFWMAd9PTG00rhv13nOTSJewJtv3RCum/NCK/i1gDvHygWW6iOK5+n2GI0uHUTkdt3b3dQP32tKCxMJFyItg3joyBEBNwKdU2jdzn1iDDlgip5pFEfdOTGHYEixOXGI4Ex3L+rkqXcmYXDgwAEBTwIfr+c+uW3+UcdNnT0Blvu0tKcI3CkEXwI2+6T+uru7W0CXwE0One1zHNT5kyOmeJxcPPugxTq5Zo6N15LgoLU8ryHnT+t/brNN1nPE5bwf3i8Bm8DMdhy1A/3Z2R+voRU+CR7W43vLfkjYUPLAeydx8Prrr0sdEiK8d/ZBHT7r04CQBngsHM/73/9+IVhITFC9odm2bZOSYEOOoYIzMffj7/j4Dbzy9S8hO3UCIR/1PSoCu+JK1UJODpfF4XQdbrXZfJF5m03TOl1HZ7gsUMv6DiSstHDy4yGBwN/6Qo6blu5UjUe9SqjOOsm8jus3MtDzM2I1LxHp6i+s265qtBpQkZ6Y3a6tLYBoECAoR31VcWtj9UweSJU8aG0LYaDPD79X+cnRYp9i28mZgsRgL2UzCHtLorNPFUwUNR8GByPobrfmAcK5DV6XyZWRzlYkGUsyU0HPQDueeLxXXLrWa3jHxZoLPwGrVKzi2PFxjI1MIxxkal4lRWFsaOYOKGTzYm8QCrqEKEmmS5hJAm2dYVn0mWTk8OFOyUqlopgpWwH2kc6WcPlSTKj7PbtbxeiOz5L3pxs6pmYyuHI5Jhb3kbAXA4Nh8Z93njfHx7rNdOY8x4BA10aTuHRlThKe7N2t0pSSq3TaqHuUq95kRLbZuZykvo2EPRjeGZGx0zCZbbNvllOnp8Ud8JGHu8UnvLzecIqrHtnaKnKclDa8c2wSmXRB5QB/oB1REiMVConVV7kSF852ONjvc1cAACAASURBVJ/nzjPfuw0SUXy+ayUS1jb6e6k2VyAXdPMgXO5h6DoJ/ZJ8F85dEnw5pwR3ErrcZ2E99b0aco7nyZ0SyLjNX9YlELOus09OmkDPawlqznmHW+f5+udHvTfr1o+LHDH/2A7/nDrs1xkTx8g+nb7ZPttmfyy8ntu8nuec9niO/bMd5xiv472wPZ7juL/0pS/h537u5+Y5ehIIf/mXfylSBAI/jQfpYfDqq69Kfx/96EfxR3/0R2I8yHHSwI/7LCQIfud3fkfGMTY2hj/+4z/e1rnLzNT9193dg2c//N/w2ssmktd/iKCPZqAK4B1QZ3UuHcuBOutwnaQomyCcK+owdBNmLYc7jzNtasBPbmNpv3fR9xoFBDwq0xvb5fLLvpnxjQA7f0BeKqCiu2FbEQF++uAz5n2zotsl6HSVo8U9AQVKtOnU5fjJyebhx8COKPo6PSJ5cACJUXpJUHR3uBGNWLhyzY3ZiTg0lOEJBrF/Z0RAtX6BdeaM4w4GTIT8DFwB5ApV3JhO4gc/BA4/0IlAgASB+oiW41IpKdDp+y12D3QxtJFhfvtiFfRuLJcriIRMdLe75RyNxrJ56sGA1jYTbkupFnjPNKJzu6tCNccSZRgeC1evxhGby4o4vyXqBUPRMvjJ0aPXkZxLwxfwYGAggmBQETAMq5nNlfDuu9PIJZJoCblw5UJCjpFwcQrF6pwLcslCFIj+XxfChOlpL43EcObsnGTX27NbhfLdaJpLBr5hsBEJehO0cOhQu6giOEeOjQ+fFYkjWpIfPT4lRMBDRzrBFMB3EuD5nnGx5FwRkLlPTn3/3hYhRHiclu87h4ByiW5FJZG+qffIeeuc2V/4ZZvZbEmSIQ0MhqRttrVdVjcDnD8S4mOj1zAXmxPJH1cnApgDsKxDUONv/XHnPM+x8DwLv3ue4379sfr9+nac63lt/XGnTaeN+nPONfXHWI/HnWP85T6Pc0xOO852/b7TF+s3jt9pz2mHNgWUMDhj4LUsVG0wAh1VBOTQ6eZGSQKlABS/01Kfxn4f/OAHJeId1SDLlQXh/nK17rNzPd3dePLpj+G1b2aRnDmNkF9FslsOZJpNEZeIXIGibqAnQo6WCRFU8Jh0zkbV5cGuYYZpvdk6ne85g9JcGkmhQtaZompZ4RQnTzF/qmAg4q9JAxgT3WbaVg3tLS4EvSGkCybmZpKo5HLC/S/IC5zRyoqpduR6fnzqAyOw0zeffu9tvX4MdNMK1JYxOVfzV+akYsPn1rB7KChZycYnC2KTIKAuH+likbZznTOfNFtgfvueNhMj4zFcb/Vj//5WFQdfiCTmo68qLlcFxat9+IxAVZJ89qGQG6GgJeL982enVGQ5P7kADaamC1fGNZvAFQpYih6ylUFk/f34PIaMpTVqoVCsIhePIz5JgqCCYCSAB4/04Nq1OMrZLLpaTORtfdG4+Kwnp7IoZXPoabfg95mIJ4vIZItChFAkzqx6x49PSra5Bw93CtdJIo4R9JialKL46zdSOHigHR1tPnhqOazrx7nWbc5DvljBu+/OwHKZ2L+vTaQDJRX1aFFzfG5MGXvoQBveOTqBa6MJUDqh1RbdRZVvw47YTZAUqiqCmWBCSYrJGA+tXhmrZenQTR0nT03Ju8HzxvUUDh1onye8mg5Vs1XM8JoBZQ1fmlbdPnjzDDgA2NPTK1HbNMbwqKo/ghnPO6V+3wFGnne2WY+cbj137Fzr/LIu67CwH9Z1Crln/jngyl8CKP/IfbM4fS01Lqct9uGMzbkf59xyv2zfaZvb/OP19cWJC994nBIAuvYRtGl7QO6bngBUB9CdkPp+iuJpB/Cd73xHbBvq26WUllnsnLIN7s5MNPzu2DEI+/lP441XDCSmGvLBN9RttstFgnicKuoIeSqI+BdirPNckVa+bhMBvwFXE3AnSCRTZeFUqBvP2hYYopmJDyplWyLL0W/ebS58PNUqqWgNfq9y0SNvGDc9qBh+GDYN7JoXkUiUi0gk8sgVPEI9eE0gZbtgen1obyVIkgtufj2P0huA6oMdvT50t3kwPVfE2EQOuZwL3Z1ukWIsxRDxDjhXXq8uqpBEIotYzI+xUcXxMtkHc3G3tfrUAGpW5PxmKLo+dXICnZ0+tLUHxbLcqxXR1mFhcjaDUlVDKMogQ6pwrXE+vtqhRT+OVIKcquXSEKhdODZeFnAmxx2bzaAt6oLPo2PkRh6JWB6d7bVIQlRjMHgQFR6mJcBPsX8iW8TISBKRiBunTk1iZnJO3A6Pn7Dx6CO9opo4emJSog4WC2XhOul7zehzhQIN+xYNc807FPOTYMgXSjh8qBNer4nlJAEEUKaN3b27BbQ9cFkG+ntVGNGNjmW1gydBwj/akDDzYCZdlPeAlu3Ukff3h0SMTskGjQA7OtwY6AvBdDExk44zZ2ckTgHnkKWRIxcgt4F8viR6d0MMYlY7uu16agYUd+v3B5DKALOzc2hraxWL97XOEHXF1CFT303ddrNCQKRhHNXI1FvX24nxOIGRnC7199Tb07qcevel3OSa9cFjHAvV1Vwr2Idjob5U/c06Tst/Bruhvz8NDgns1LHTFuFf//VfJagNx/a1r31tnlBh3xxnPB4T4sb5PrfBfZmnQhsE1wv/Da+/bCA5cVxE9LpOim6Zi2qnyP9SFM+6XksBuwNuFARLnvcadUjis4G4EyDnwkYdd7W6EEErlighkSzBMmxYxbQExVGaXqUCKFfJcpMI0OA2yoiELcSqEVSzOUjq2Xk5vrwSqNgaqmYInkAQVQl5Y8HjMlFx0XLcg+4OL4IB+maufNO8B3L3bktDf7dbCJfRCUbzA3o66Od6833WzySvj4ZduDGZFO62nKM6gb7cGsZvUG/dImDH0LDkcsmFJ1NFmFoF1XwWY1dycBsa2qIeeNw6BroUFcv+V/PMFo9l4X4pzibHzRJPMPNaBS6/Ah4uNkqfpiQ05BLKxTIqpYq63woQCbpQKhdx8tgY3F4X7FIRO/s8Ij24OpHEG2/YGByKCPAz4uDw4XbhNmjZn6e14wYL54mqgthcDp0dAVDK0Yxjr++G88VnTre7bKaMS5fiwun39qj0GWudz/q2V7PNMdMN8PKVBK7fSGN0LIXx8TT6+4NIJouSLIfv/b59LaLWYjwASj/aWpUBIL8/GsXOzObQnyuL9IPfE6USDnFHLw1KhEgo0EOCz3mFEBerGfp9VkcTXfKly5fw7VcuYMeOPrS2PiVzTIDiXNOSnJbidNuisRk51+9+97sCWFxjya3SOI1GZ4xQx0Jwv379umwTrB1unRbitBgnR0xjOhrFkVtnIUfLc3SLY3sEZceHnYZvBHxapLNwHNTvkyBg4BjqrDlOtsVxk8jgPsfK62jsxvFzn20TYPndO5w5x04jOBIX3Kb+nNHlqP9mMByuEzTUY+E2OXJe21hIUDCBzM/+7M/ir/7qr8QYkMccvX99fec9VseoihoVota2lUHhNrjXz1aTbT7Ip1/4cfzHNzSkJ44h4KtAr+ngm1SXQxRpUzyezBkSilWCwixghSwwXDgNB+2bNMTqBCiGbaXpFff5LtDqvFiyMTWTx9xEal7xz8U2ljFAcGdkOda3dCDspesIUIALBnVKEr9OZU4ht1w2g+gcHBT1gKHRWIuxoqnHVn2TmWnkeJoMd/4Qx0H1AMdKoOY4xsaZq9lGT6d72Yx4vJb33BqsikFNpNuC20U3wiriiRJGLkxIw8xJz4h6JRrPVSEqA7/P0V0rDwJRCSw2R5gf41o3yM23tViYmMng9MkMIj6CvUuBhKYhmS6KkRrFx+TsU6mCjJtgQj21PLeoBV+2jHKliECIxjyM768h5DMwk8zBGNNFfByL52GahuLgiwsix7WOub4+CQ5mussVKtjd5l11nH8uHhT1DQ1FMDOXExUIbPU28rneqsJ5ox789JkZcD6Gh6JgwJmzZ2dx5UpM3iWqYDiHV68mcflKXEB9aEdY5p/vK4mxUNANhur9/g9uSCAi+u77/czHoMBAdMXXU0hnSjg4ELkrIijeqjnfSLsEKf4xCl2xmBNuky5qdG+j4Rct4/ku/cM//IO4iNGtiy5j9CknADOYDCPSsR45VALs2bNnhQAgUcCMag7nzXYY4Ib9sS5BkpbsvIbv+fHjxyUK3a/92q8JyDLqGwGcIu+vfOUrItrmvRJwacVPwoK6bgLy5z//ebG0pwEbiYgXX3xRQJ8cNMXiHAslAQ899JAQHo5/PokD3pNTh4TAxz/+cfG/Z/AZEhZsk8dJVNDffilw59jIvdOqngDP+/mTP/kTMa4j8cD7Xgzq6smRTAgHaS9TxcycOrYN7moelv2/t6cH73/hx/Hq122kJo8j5FtaB89JJmbHmYBFtxH0LNa30Gq+WLVQ1gwELF24N0HAJiMQoBQr5YWllIamhqmjVK5InHuH+GNd1or6yxKYhvvsmWMIBUzkrS7kikAxHYOnEkcZJipWCJ39vTi4rxXhYC1VKBuhElz83W0xPGsytBUPsX8SMNGQCZfpxcWRrBAqna0WKjy5ROEpGsBxHrlNAsTUdbRFdYRKVZQrFHfT4lZJRahbJRjwXvnLcdc3v3RPSwygyWG25zI1dLeZYmBIIzzOOznHjhYL0+MxvD6RgM9voViswi6X0NVmiQEfm+P1rM8IaOr9IBEFsQsolm3s3tWKeJxi+wT27mkRI7uN+rE7t8F+CWJ0CaRPN3XpzRYHp36zX9oLUNUw2B+qqSY2Y1ab9aTmiVw709gy7vuTj/fWAuowqJBH7Cr4Dly9lsT162khhAYHIujvCy4y+uM9M6wsr08k87gxnsGN8RTa2nzi/cD3ZGaW4v0chnZGJJGMLd9a83FtH11qBlSs8/7+AQkD+9JL/4K/+7u/E0ClSxpB2dGhE9wI2gRyuqcRnMnNsh65cwIXmQsGkKFhGblecvYEZhYSCiQYaHVODpmcMf3JCeosvJ5/JDJ4Pa3L2R7Bme88r6dEgMZpTOTCdqjHposZmTjus7A/1iGY0/iNmduo46Z/OYkISgPISVM1wLGSoycBwj7o9sZxs09KBnjPTvuUSpDzZwIYFo5tqcJ0sRzvL/7iL4runX0xJC6t6dlns8J7d+aC57fBvdksNTnGQDfvfe6TeP2bZaRmTwnA04q+sfA9i2UN5Es62oNMSKPAXtVTPuSeUBBDPT7QeItlGQa+sXkxeCNoptMF6JoS/RIxuIhTckyuXYCRoCLGaDZaqTjWDEynDMykgWJFhxnpws7d3ejrpmsbP5ylX7SbBrGGAxRz8j79frqdKGt2AcZl8KHxnXfmh4BOwymCpVOH0fGc88s0uYYRN6/K/siZ+30KrB0CIuAzhEsv0gACJfmiLMsFl4sL1UJbrF8PqiREcrkKbN1ANMrwnC5RMwwQQGvuWQtXr3+LY47FcqJ73renVfpwpBqraZVj5tj57C6PxISo6ujw1aQ5zbmI1bS7XB2CO/3rmXq3kC8LcUIvB3oqPPlEr6hGmCGM52mQSm6c91Rvzc8x8z2jFwP/mBxmdjYnAH/x4pwQBV6fgf372wTsxX3OeajLDW77XMMMqMVn7PoYTJcbjz76iMwtOXJyxO3tbQJyFJnzvaZ4nT7pBHcCIGO5c5vcNUGQLmIEfHLSvJ6gTy6XhWBILpn6b3LDTjAZghoLAZdW6NRTk2Mml0xwJfFQyOfl1+N27C+q4hdPoEwk4mKBzr5ZGNSG46F0gf7xBH2Ojdw8xffk+CneJ9dPQoD3ytWWSWNIDJDD5nfDcXPMtHhnO5wD3j/rsVSrlXlDPzlQ9x/niDH2eU8/8RM/IZIC9rcUsJfK/E4o6SNDphaebXCvm9CVNmlkBxrZfcdEcoo6eF6xAPBkHDMFHem8joivIhy0AzysSYGm36qiWK7A53Eh6CcHXocAKw1A2lAccalYkpzqmSL14bWXWz3bpq2QEDAFCA1UrSiGh7uxdzgkYvr6RbHpxRs4KOulBglgk0qXJX65Aui1QzHbalx/G/c3MNQVL12qfyE6XIp7IGXVrF6zxkmk0bqVYuK+3gAoViawrUUN0qxd55iz6NF6X1QANcMy5/xqfjkWphZ+7JFujFxN4OJllWhFIv2R063psVfT1kp1uEYTxKdnUqI/50JFwoSx9J1CcTsNKxmRjoXj42LW7D3gMUcCQpfDnq6AADkN6EyXLqF82SfbaHa90+f27wozYNO7IohIyY9HH30c3d1dcsG5c+cF2Ds6O/Ge9zwlnLBhmOjs7MYHP/i8RKkLBIJ44YUPo1gsgUZ5H//4fxGCgMdbWtqE02WAMRaC/osv/ihu3BhHKBTEY489IUZzzugeeeRR9A8MYufQTkxMjMPr9eHIkYcxNDQMy+PFY48/IWDLYEUEX3LggWAI4XALjh49jsHBIQQCPpEW/NRPfRbjExPybuzbtx/d3b0C+OFIBIMDA8LZR6OtYu9BwO/s7ILfHxKRe29vP8KRMFymCxw77yudzkj/jz7yiDNcuFxuBAIhAX/HpmD+pLg6V/CVr3wNb775lkgJEskEPB6fSCca6/l8fpguF0zDgOVSqXO3g9jUz9Iqt69eG8N3vv5PKMXPIuihPpziIKBQ0jCdMuGzbLQFysJR1kOYI5Kdy1po62rB8CCNk1a/sPB6Ur83poo4d34Ghl2QaGse05aMc5qtIeyrwGXe3CbHR518suBGT5cPgwMB+D00KFrlTW+gGsdM/+orYzn0dXnREjaFy7qfF1QFZFVcny6hd0cHHj7SJVQBQ6puViHXTg+IEyenhXAgSK4LyDTaQpjC+R4/MYnOzgA6O7xiiBYIugUw+W7Wv+trvQfOB9+TkatJ8Xhge+S4d+9qEY6d464vrM+y1nfI6YfXOVKJ+na3t9c6AzQqs+APPgrDpWKg39wCn13tgcnJ+n0uQDXCWN4gp97iOnxWcqaJtJTptVkYcGxxWdzGQj+q1n/8x6u4enVE1AOmaeGpp56Ez+dEKHTaYhsszri4XT9mdVa9/fV1nOP8reDVV1+D1+PBE0++R/Z59OLFy3j1O99BvlCAURdyduFKG9VyBcVySVKSkwBgHI3GQqkTiddDB30IhgKYGPfi+Rde3BbLN07UavYHB/rw1LP/Bd99+Yso5EbgtUwJBTuXMcSXPeIry+N3XgunTe5zAfOaJbH2potZJGgsq4N2rpVfpVJGjhbUtjJ8Y3AbEhOy9pGRYvC4xo65CJI7gSbuQ3t2BmFXl8pkt6jHTdpRBk50L0tny+JiRjczgk/jor1JHW75ZviMKMGIBHRcuzIjroLDO6MSN34z5oTvGRfES5fjCAQpVmQu6ObvxoqTJe5iZbGy37WrRYLgnDg1JdIA6rAZ3If3shZx/819Kkv2mdksmCBncEBJMSh+bzYfzd7xm9u8+Qiv29g4b27z/j5CNVkV+dwVGKXZ2gumDHolBiPxznlYpKxkbXIWKDJGNdAW8ORxuUBNaR0Ft3CFoHgdJan64gUCrdKH0w6POttOC6om1XkPPuBGa9SHcimOrq5u6PZ55DO8QtV1JF8cv7RSG//8mOfH19jPwj0QjCla7+qICVdfzB+ric+Bvh4NP/PTjyv9kTNHbGrNRYkLS6VZ+EMRdPcoq/xtsfyaJ1Jd0NPbi5a2bsyNXJEodLGsKcDaGqg06NkXd8CXhPHoC9kcxiezCAeDK7qI1bfAd4CLdthbht8qi2U723SSm6nXsv4KtU3Q1+wKaHrGKHmiIr652i05wjHTW8Vj6ZiLlxCLlyQuf0+XR0BtQ+/1LRnx7WmUOnl6FLiMMi6dHcfcXA6PPdoD+qBvFIAI7mNjKYnUduBAmyTlcUTU6707gixd4egK+Mb3byAYoG2BisjVyBWtpg+ujc4CyvFms2Vx/eto90mM9yJdCreN3FYzlXewjgKycmkSpaLKkX4HB7OmrslcDPaTG2IcjGkUclM1WF9TM6uq3NPFeUoim0nMEyZ85+sN4FbV0DKV1DpaRrWi4oFsg/syk7XcqSrjF1dKqJRLiBWpZ9fQFS5Dt22Uyly0apbwtUaEyKtRfgRij1FAMp5GKkPunY9hKVheGIVDKPKXbmv841VCITv91NGqC1cqqpZucFyg5Rohc+trLLFNYmKJU6s9zJeO7nUdrcr3PZOrYDZeRjBQRmuEPuAb7WG1I9l69Tg34RBjbwNjTGATy6PXSx3z+ueEenu6vl0eiaOHwX9avOI+udG7J1FJsO3tDeHFD3kwF8vh7Fkmn1EBZeT1XmHYHJvcbI1Ipb6cAM/jY9dT8tsS9UjY3W1g3+gTu73XO4Ta7e11/b1xLVRras0wlNb2629uhStVy/J/rRN++4zlsCllfuC2RBZlm9vgvs6ZJcXl8kZQNNuRzmlweyqo6DbSAuw2dNEDLTw4iq4oCqePNqGWC2W+kMWV0RR29ntVEpoVxqJeDHI4eaSzRQkuT2v82hu66GqqcLiOMrooOUT2WyhqsIpFcbFwjJAIusu90Tzf7KN1jvE+FhVpTi3gTh0CFat53JBY+vliFelMRcKyBnyMp64s4Be1c5/syAdeteH2GAh4baSSBVS7AmuS5jROFTmCyamMiMrpH66Y34bn1HjRKvf5LjFQEbOnxeIFucrns+QZ8jmrxVIZqTWK0/n+5vMVcatjdDyql6amMygVbQRDFnJZii/9tSA7C9/OKoe2Xe2OzsA8utzRUay184Xl69aOf8l+Nr1bNqga3Qb3tb4NtfpejxuPPfVB7DnwKEpk1aGiFXFiy+Uisqk4SqWCACNBrlDII5NKiPsDm6BlfQtV57qh4pNXyvO6nuWGxMfmi1RgenfApVegVQqoVtQiK9eJS5yGRMbG1FwFnS06Aj4SE7RYB1w+LzIl5cpUZfISJo7RllhIJfa6EuNLYLu6gdm18Upue56svaR0/dBQgV3lzanMMkr3JlSIHCOh47HKmJwpYS6ew76dQXFrYiz9hY+grrN7fFPume59Nc5Y3S4ndO2AzCQ6xUIF8XgB0YhHMrotF2Z2PVPL8SoRvw2DYZTBGP9FUF9O8GdCFwaQoWW7Q0RyO5EogLp6egl43AbcblPS2KaSJRw9Pom+vgDa2mkbsPb7Xs99bF+zPQP38gxsg/s6ny6DGXR0dMpfsyZkeeJ/NdCjvzf9GuuXLTEnEZ9tisrrzzRrceEYuWnWJpCWSkUUC4xlrgp124mUjX/4Zgb5Vhvv/bAPna2GhKMVVYLNKEbKErRcLiGfy6JaszZ12nB+ucjmcgVUyky/WLuRGuSUCjnQStVlUW9uiPRAxlRmxrUCinlyZDlRxeo6dbNuUR8Ui3lUq0X4W3S09QBTMwlMxidRKOYQ8LvER/Z+W9vJ7TLOQKmiobXFK6Lp9erH6fJ2fTIjuna61t2qcKrkyvt6gkilSzh7fgZejwszMzl4vIwUpkkaWQa9aW9X+j9KZxgCl4THgw92CHfO95jZ2xgXnu9yX19I9Pjb4njnC9z+vV9mgAygMjAmo6UI+0bJ11rnYhvc1zpjq6wvULiAh5KatT5jzyqbWbEaJQhYcAOW+penbUykvPjJD3mxd/hm14kVG11lBYJ53S3OXyVkSk2n6hx0RPT1XBlBgCqGkZFLGB05jbnkCKLBIlyu1cXvd9q+m39FjG0Ds/ESIi0RtLR61825kjsmwDIcK63jmaSGsdNvReFzZH87BkM4Fs/hAtOt7ojg8APtEvv90pUY3j46gb5e+v4qA0GG5aWxYDjklkQtTHQ0O5sU/fqTT/RgeCiyPle9W3GD221uz8BtmgGqudLxBKZmC+joiaKSySJb0dHeERS9uayn6xjLNrivY9K2+iWz8QJCgQr29jeD3s0b/VKty3FRu99cwwF5ZxQ+nwcHDhxE/8BOvPGfL2N67h30dNGC9dYRJU7fW+GX4J7PVSQc8OAOusIZy2ZrW2rMjLNOLvjc+VlJrkO3Os71Rqn/pfrjcUoXfF5TMrNRBcAocB6PKVx7MOiSZC9TU1mcv5BBJOwBc6vvGAwLZ+9EROQ1zz3jk2s41vtNarPc/G6fuz9mgEwfjZ0vnrqAKzM92NNNbk0H3fXkg1gnum+D+z34/tBmz+/SJJXs3XJ7jBfPF5r55BmMZ53v891yuzJOAjvBbHKuhI7uVklby4AUzYpw+A0nHCCkpTlF2e+emZFod/v2tomIj/ruW10oGCCwU51AcHbUCdSn79kVlaxyzNzn95pwuRk9i/m2F+6RxAAlOBTLO/dzq8e83f72DGylGeB77w0FMTzUiaszczhxLo8HD/WpLIWlhW9lrWO+P9ijtc7KXV6fSa+iPg3MRne3FGZbyiauIUKitRmS3S03ssZxEsyz+YrEUifQOfnknWY4FQRvJguiTm7hT7mPUTTOxYHBarL5ssRaJ/d/O4DdGSMJC4J6va6cAM578Xpd6O70S7Q5t2XcpHKgeN8xunPa2/7dnoH7awZsFPJFeMIteGBPN4xCBlMpelZtDJ7vouX//nrcG7lbckJutyaR4DbSzu28NhAMwh8MoVy8AU1TCRxuZ/93qi+CN/OICz3TQKRTrM7jU9NZ8SXnNhUdPE4xvNtDQ0UD4xNpOf/gg+2IRj3rEutv9P6bcd08RuDevIC6Gx3l9vXbM7C1ZoDfNL8TQzfg8TNrYwDve64VlYouwaI2MtqNkQYb6Xn72ls2A3SHmk1Wce5qGW+/W8S1cZUy8ZZ1uAkNB4MBBMM9SGaYE7183zDvAuD0fLgJ2BXHfm00ibNnZ1AsLEQjpMtcXtzd8piYSIuY+9DBNnS0+Rdxz5vwWLab2J6B7Rm4lTOgMdOliUhrEEGzhHPHLiFWMRAJu2EwEJq+/sA625z7rXxwd6jt1pCGRKqKf3s1B69b5UT/+DNe7Ozduo+bLlz7Dz6KfL6Aydlj6IyWJT3ibVAb36GntHS3pOYZ1vX69RQuXYpheDiKahYDIgAAIABJREFUAcmMpmJx80qKwEUcXqmC4m5madsWby89p9tntmdgK80Ajejclo0b16ZxbWwOyWQKF0+O4tpkCt5j03jmPTvR1+GF6fahszMIF+j2vDYbmq272m+lJ3GXjWW4z8RPflhlN4qGTXz5lSyOnSlhsMuUGO9b9XYikRAef89z+I9XUhgbfwfdnT5J+XiLvLm2zjRIRjTUgr7oYmB2YzyF02dnxZVsx0CoFtdggb2nrp1RCD0aDdScoDJb55a2R7I9A9sz0HwGDJcJFyq4eOoSvvHNYzhxeRaegB9Ry0S+AuRm83ChiHNnJnDlWgydO4fw/NP7EHZrIrFbWAUWt091bL1z8ja4L56fe2LP5dKwf6fK6csb2j9kYnKGQVIYwOZm97StdNN+nxsPPfwjOHfGi6mZcwh60wgH3KjaW3vc651Dcun8KMfGkooTL1fFOG12Lo/eniD27I6KyL7ewpx91ccLWG/f29dtz8D2DNzeGTBdJrRSDm+8fhxvnBiHFWjDZz73BPYMtUOP3cAXv/R9XCjo2H1oCHqmBeXYW3jp//sa3j41ip//6fdhZyvzLjCa6UIRWxwGwioUUSkVoWvbWeEWZuce32qL6Lg4WkEyY8NjbX2Q7O7pQUdnN85f2IXzp19HNTEp2dMa9dL3zGOTL1WFCOY9MnLb7l0RdHcFxfag3gr9nrnn7RvZnoH7bAZ004ReyuGVf/sPfPXdFJ5/8Ul86Ikh+N0a0sk0xmcsuApZXDyTwVRex0N93fjkT72IHX1t+NLL5/Dqdy8g+Ow+tPoNlGoJZxxgn5nN4epoBe2tBZiuAtq7JDjofTbD99ntEjfGpqoYGa8gllybzuZOThUlDPv37cfu/e9HvhwCQ+XyRb7XCi1lyZX3dgdxcH879u9pAVO0kmtnbPltYL/Xnvj2/dyPM0Bg93p0nH7zHfztS6cxuH8XdrW5MTEZQ7ZQRiWfl+QfAauCq1fGcOZqEpVqGelsAb379+BzP/0MWispnD43jqJmwmMZymPG0jA5mcLIVRN9/U+js/tHkMn5ZYq3xfL3+JuWzdnCsX/sfR4MbWGDuqUeQzgSgemyJMiJUS+LWuqCu+w4xes1xt3xcxOR+z1vZ3CXPaft4W7PwGpnQDEhyseN3zbVbsVMHlMXR/Gll95Bx0OP4Kd/9BBmzp7GP335ItyRTjzy0E48dCACv9/AxOUbOPHWVTy/143jrx/HpZiFj338CB7TSnjpW2fFcPbwvg4wtzgTNl0f92Bo+CkMD++QIdpQuUO2XeFW+8Tu0nq6Djy8z4WH9rnuqqA2nO5yuYKx/7+98w6S47rz+6e7J+e0M7OzO5sTdoFFJAiCOYgUT5R0li74Qp3PVbbvr/vP/1yVXdYf/svnP1wuX5XrXD7f+U4+WWdKOlEiKQoMIMCAuMjAYnPe2ZmdnGe62/V6sCQIAkwKR0rTVbvTPf3m9Xvf192/937h+1ueoVnPYLGYv6Aj8NHNFmFtRriL0koDbAj7X8FJzEej0D7bRuBXAwFJFg6xLadY47kWXBWCgbEhHOSWUJ0RvvmVfXSGXQxMjPLIgS6uvHGS//jfXmc53cBiUrDaLDgcOldPXeKF44t4uiOEvHa64iEyS/P89bff5p0LVVLbEWrNCYZHH6Z/oCXYBYpma8vfqr1y/9W4p+7ZC7tVYij+xRzmlZVl5m++S8hVBkRCFZFWV6SXbdmn79npL9gJYW34FbQ4fMFGod3cNgKfHQGDr0JvkljbplTXEX7LDqcNs6yhO9zEQjYc7gAHH+nj0EQIvdGgWq/hjsT4/T/+Eq8cu8ab78wjNRw8/ORB4tYs/+MvT2EemWRyOEi9UsVs97Bv0MOJ76WRzGOM7z5Is16j0lDJpDM4PR5sZoVyqYzP4zGSznz2HrV/2UbgF4iA3WbBH+qlWjKRypbRjaSxTbwuyXA6+wVe+pdTtdRK26uqTWPiAhq/sBytP88eGXrH9nTk5wlpu64vNgLi3STCU7PrKeZTNSrNOrFI0FihO8x2MhtbXJtdJ1FPU08ucP3KPGfPz7GSqNKza5ijh+LkV9ew2f08dH+UZkXlS7/3HI89PIzPqpHJlHB7nYzu7qfneJbs9hYms8zNuTkKdYWhoJ+CxYLZ7KRWLADRtnD/Yt9Sv9qtj3bGeCL0myQ2Ntne3qTeaLK1fg2NNFbLFz8trKGEaGpYbSYUkw0kK5Lcspd9nkdWaFCMicjnuZHttrUR+GUioGvokoXOuJ/FzDLJlQx1FMLdIZx6lfOn57h4fYlktspUpUq52qAhWenssqFXc8wt2hmOBXHaLfi8oKLgUWpcOnOFcrlGtdowPmvFEo6QlbX1JRJbaSq1KoUqWD39WG12w9pudrRygH8x9bW/zEFrX+ufEAHBj28m3hOnuyduOJJs93SBWjBmyf+EDfu5XjrSr+L1OQxbmWL+PBvchTlEpV6dp9lIIEmf/4nIz3Wg2pW1EfgIBARDpD8SwNGYw26WaDSgoUrYPU5GRnvovJwivqebrzw6iFUGu9tJyG8nl9ji3NkFErkayUwRSzXP7PQKJ167yM2NIpLZQiQWZnQoRijoxmwN0Te2D7fHjbbSpFbV0Yy0sa3GyaaWK11buH/EYLVPfX4QEEpgkyITifZ8fhr1c2pJ8LZ6PvfiUtcNwY5Bhfm5b+1tyLZ3f9EICIIWIVcaDfVD2Q13ri1s08J7XFO1DxCx7JwXn6IeQUfdbDTvWc/t5e+9L7IptrzWf5aQUuEkJxI06apqEEzda/otJr66YmJwdIDYLgmL3YbPY0Ox2OifiGP5n68ynbXh+uYh/FKDpq4z9eZp/v57F3B3dyFVshTdHRyN+CjG7fzbf7eHpdUszmCQibE4I8NhEiurnL2gcWTfBG6nDbfLhWZWqFdLBm4WsxW12TAgaQv3e98Z7TNtBNoI3IGALnK86V8cvoQ7mt8+/AUhIIR2NpWhVAN/0I3V1Ir++MDlhCOsplETE0NZMSYCd56XdO1WPTq+gBubWaQ0vpc4/cCv7zgQKZGhXq3T1GSsls8YGCba3GyQzhaRLGacNuGJfo/26CLCB2JDnQiTmyCjUpsqaqNBRTOzazTI1Jk0G5kGw7uDZBfmeOlH50mbOnhiyMlf/Nfj+J9+lvGJDn78/RfY0g7zJ3/8ZXb1BrFaTdAscu2tBJlSGLvgnkZhaGCMaq2OJJuwW1sRRRZzy1v+M/b4Dhzbh20E2gi0EWgj8GuJgGJSaBTSvPHaWb7zD2e5MpNGN8LB3odDRLmIVf3qjWl+8vpl1itgvoMKW6zodbXG5dM3eOGFM7x7aV0EiRsr/fdr+mR7JouJWnqLl79/mreu5BDHBvX6J/t5q5QsYVZ0Fi/d4L//lx/ywhtzVDXJSNH8UdVouobaFIleVIM6WpJ0CukS8bEB9o/62VxOUavWmLmxjjM+yJ/+ySPYsutcmk1QrkPP2AC/88295Ofnef75M5y/um6QeM2ePsd3XxKhcbtxOW3GFMNqc+D1+vC4XZhE3DNgvRU2/N7KXcy82lsbgTYCbQTaCLQR+DQICNmR20og+wOMu8zk1jJUB/247CKp0Y6WRzfiPevFPIlkib6ajiyWt7etgsWqHsnE8J5BdNsq5UoVTdUxGVL5HqvluzRUTDYqmTTHXzzDS29uMmnr4mm5A0PGfQotgCyZoJrh2lyKnGYhWiuRL1bxhuw0VeFUeo/tjqaKPljcdkYOTaBY7VxdTHD2goRm9vDkM3HGh0IsJuOEgy5Qa+QrEoe//ASHniwzdW6JbCrLzSt5/vp/vYO99zl+66vPEHDZWtB9hNh+T7jnCwWqjSY283tftdQhIgj/jj7oghYvn6dcU3F5vIaH3x1FPsWhsOE1DMcDm1WhURNhQSJfdZFqQ8fl9uK0tdQNpXyGQqmG3eXF625lPfsUF3q/qK5TKRcplCqYLDbcHg/mn4MOo1ktkMoUMdlcBPxufpYq65UiuXzJqMtlk8hkC5gdLvxuJ416lSYKit4kl81jcbjxuh1oap1sNkulpuF2e7BZTAj9kMgXLM5lsnnMNjcepxW0JpVaE4vVati3BDj1agUNiXqlTLlaw2RzGPGSWr2GpijYfkWJZN6/Mdp7bQTaCHwWBBSTiWa9SK2iEfQ7WnwUtwtuwcQoW4j3dTJYSaAL7/JbTGrvXU8Ie11jdXGVzbzK5MEoJgXDPv9emY/dkRAr5Wq1Tl1ScAetVCtVw0Xk09Ng6cb70OVxMzFmw+MxG8RahjwUCZ/e49vQ+bg5g5hYyIqVoYk+sttTfP9v32D46EG+eV8nXo+Fvl2D7BsOUWzWqNab2NweuntDRKMBVhbX+fE/vsMWY/zpn/wLeoOOj0VBFFC+9a1vfatcLnPu1FluLG/j8jho1CrU6xWunT7L1fkULreFshCEVpsxi5q5cooXXnqFG3MJFIsbj0Mnmcqi6TKSrqPpKqVKlWI2R7lSo1TKkyvVsFqt1Ip5ivUGWr1BU9Uxm3UWrl3kzYtFhvodXHjpHd594yTn569wfWEdxRGiK+QmtzbDsdfe4MyZy6TyVUKxKGq5SF3V0RpVkqltynXVEHjZTIaaqtPUdGRUGtUqhUKeXLGM2WrHpBU49sPv8X9/+AYLswsoTh9evwdJa1AW5bJZ6rqM2qhRLossO5BLb1OsNpF1DDtKU21QbeqYTYox+akVU5x89RhvnZlicS2LxxfGIovwhQaodVJbSUp11cAvl81QLNWNmZe4EWuNOqViyRDU4gGwmhWWps/x2rHXmTo1zeriHG+depu5VBmX28nVkz/gnfUahcQcL/7oZVZTZWL9fdS2rvJ3LxxjdWGJ9Y0Npi4sUMNEd9TNwtTbHDt7iUSyRoffy+KVE7z45jQ9ff04bSaKW4u8+J3vk2go1NJL/PD7P+DsTJJI2MPx518k0bTT19PxM01YPtEd2S70OUdAo1lfR1UzbW/5z/lI/dKaJ0kGg2RiaZOtXJOJgwN0huzomm6sloVga/3JgtgB3WTFH/DiNkvGan7nvElRUCsFzp6eZT1TJ9IZoivsMnJKCMXyTrmP/hQZE8HicDAwFCEWDhCLddAdsd96337SelpLWsVsw2XRyKTyeDtjjA6GMEmCMlr0CZqqZkxk3m/T+/Xfib/QTCgWC6EOD9WtdV565RyLGZVYVwi3TSGxnmCz6uDhQ704bRJrs6tcu7HCWqKIbPMyMnGAyeFRHA67ce076985rlar2Gy2Vpy78AacnBhm4dIKJ3/6CpuJFWq1BnrDjGpxs7pyjpsXVzj8G3/Abz41Rq1RQzIJOrwY1DK8+oPjLGer2LETi40wPG7n5VPT6CtbuPr9FMtFbIFe9oZNHHvhBValME8ceYQH7t+Nw+GmlMuwtiU8ECC7niS5ksaxL0YkHMHttKI3q1x4/RK28DDfeKwPvZblzLHnefmnb+OIjTAUd7GeKhB0OZDLZVLJNF0H70N3BXhy1Mvcydd58+oWqjPKH/2rP2IkqJHdzlFVZerpTc6deoNrhaMcDFY489KPKapOeob6mbl4muW0mQeOHEbZnubUZoPdfSPs7ulHcxdZlsL81v2jKDRYm5viSjXIs889ikuqs3r1OH/32nFSFSejPXGqmRxSb4ywrcniwgoynewbGGTkwU6uLc5x9vgl1KrG49/4fZ483EfX4CRf9oT4ybdfZOpakS//swdIbWY5d26OkJZjNbmBZtOYOHyQXD7LQiJNvFkkVawR7XSTzWxwc75JuLcXvVlje2nNcPbYmL3Mq+uLFGqbbCbdoLbUZpVSmcLKJpYRE0N7d7E2s0whOIKZBluZDEq5Jlyp7pxr79xP7c82Am0Efk0R0FQV2e7m0SfvQzKZcbqsxoJI1++yVrZF2BUIUas3jffJ7VJKqPAVu5uvfPNRw9ZusViw297XJH8aeA1h63NyJBSiVK4jiUnFzpL7E1ckFqoS0f5unumOUdjO8OZLFxg+NIC1WuL6bJKekQhqQSHWGyDgMxtJoIRWXmDSbN6hnxeU2g0Vq8fHc3/4LN39U5y8nOTEyWsM+SWqmhW1VuLl773OdrrIxkaJXfft5vFHRzkc85FKVZm9/ipbvj46wr1EIuHb4ftQrwzkxAzE7bZipcni+iahgIP6/BwF32H6bVXcbiduRWLm4gLVx0bo6R5Clv0US+u8/I/HsMhxnnxmF8d/+CIbWQvBDi/T15cZd8aIdXlYTNkZjIeZv/A6a1UrkQ4HFn+QWJcIAmpiMpmwakVqpQJFdAITBzh0f5iZqatMvXuNvq/fh66paGY7wXAHleVlpqen0bwRrHKFxWST4bG9eLav8M5CkcGQi+X5eXKWPPs7YszdnEe1x3HbVOZXNulzB3G6g4zvmSBumuHNmWWall56GjVmFlbZc/RL+NUy+RLEog7mLk8TiwVo6nNcun4Fr2LH5M2x4XDdAlQybEuaZibYEcJZT3Hi8jUKVQsdPhOryxkO7NlN0V/iyvVVekMhEldmuVDS8Q4rLC8vkWtY6A3IrM3NUdkbx4xGqdyk7/AhpJkNAm4nhWQeOdTDZLDMzLWKMXv0+QI0akVDdWXMJKt5tosBdu/aj4M1mo06uslGZKiPmUvXWc8mWZ5ewubVKeYKvPXW6wTjPeweGTAmeEWbhVQqT9ni59HHDxGzqQwP9dGwyLcp2T50H7W/aCPQRuDXGAFFkZlfzvD6GxdJp/O31PIfBkTkUugIeeiM+o1V750lhB2+VK6ytZWhLrKl/Qx5nkXceSDoIt4VMur5LF73wiidL1SY2N1Dn6POj//2BGMViUEpzY/eXOPR3xylvmohub6F5JPQGxLVTBFXPMbukQgOE4ZT3e39FCF+isnOkafuJ9y7SSpXx6bU8YYCxOrC3K3h7+zk0MMRwsEAvd0B3HYTdouE110hmbrA7Mwsmxv9+ANd+AMBXE6n8D38wGYIdzHLOHXmBg23m94uJ6lEisDgKD6rE3VplZnpHLZImEin0+DMrZVyzMxco9BU2LV3Er1UYOr6El17DlDMZZm6nqEjFKG/q4+x4Q5SGye5Mb3KxMh+HrStYQ5GiTl0NtdSRLtDdPf1E7r+Gn//3es4Onrp9luZuXqVzWyV7l4fZrOV3Q+N8cqJKf7qL6fo7Y2y+9CD+NYyhCIdqHITTyiMwzXIJHnK2Q0cZj8Rc5W3Tl/FHd/FmNXG8kaCpi4jmy3YrDobs9OGZuH+Qz0szM5zaVoj3DdMbzxOf8TLdlVGsnuQqyWDOjDsixkmgmx+nkLZhOLYZPbqEgMTvXQNTDJw81X+/m/+inBnD527D3Eouo1scmDXrHR0hMnaiyiKDTm9STnmxe7WOXv6IpZIJwcnZTLLc2gW2Ujld/X4MZ5/+TS94weJheCVH/0Ea+cIzxyNYEqvEIv6cRSTnHz5J/h6xzj8gA856+LAQ1/iS/cdxmeTOVt8kWOnXiWdWaIr7APFTt/uQ/Q93UFuc4Yb1zNGnKTDakVWFBzBANhkctUylnAHEYdQo+l4vB4aTtuHfC8+cCe1D9oItBH4tURArJKFX8/lywv8+//wt+TzIhfEvbevffV+/uzPfo9auYr6nsOdWKyLd5/Cj186zX/68+dvc8a7d10fd+aBI2P85z//18aKuSni1D7lZrNZeOfd65w/N0PPwwMMdIdYeHcGujyEfBbWEhlMGTszJy5xXWsSDgaxFDMo/cN0RB5jOGKj3viw852IRS+rOv6wH3/UhNtjMyYh168n6N4/Rm/ATKPWoJCvIqHTaIi2y3g9wjveRr5QJZG4xNbGPIkNJxZrmHCkD5fbQSKxjk9k0xR9FTaK7v5+xsbHMasHWF3fxOz247RbaZYOkStUMdmtBAJBLIpMMNbL7kmJUsPKyGg/anmLlY0i0a4YWmmbdEnF7fHjtphweB14nnKSqcp0RwJkB1fIaVZCTidmq9UQGJ5oH1/66jMsbxWIdMUJ2GBxzk3fLicD/X2YJInQwB6etrpZTRSI9g4ScsusLCwjufz4fU7DKUzW+hkaLZDJl/GHI1jUEmtCXd/hp1ksUmxIBtOZ4OS9/7EnCPVv4PBH6e+JUj24RaYm4XFYsVntOBw2HrLayNdMdEa9HEhuUVethIIuirk0NclKeT1BuVQ01Et2bydf+dpzTM+vYvNF6Ov0s7m6TLFuoiPox2pSUGWdZr1KJp3F4QvilOtsbGVxhyIojRK5/B5CXT0GxvGxA3zD3Y3F6SMUctEzMoozEKXfb6Pp2MeTMTNys0xvVy9WX5SIxw7OCb4SlXHciumcOHoEX18fDdlKwO+lJ9qNLRChO+gB9vHQ4xVszvcdEy33H0E3W0FWGeqTWip4WWH/g0dAMdGKnvyUT0e7eBuBNgK/FggIoheXy24IdxHWducm5IxYPYfDPuLdHdSqVYN1cqec+I3Q4nZ2BvF4HGSzxc8UBrdTn6bphEIeurs7UJtNwxlu59wn/bTaLXSvpiiXK2CxGQ5x1vQ2ksfHuN9EzWvDLDlxqzHq2Sqd8SgULWQlGa3ZNLSdAokPK+jFuknCYrUYMrBebWB2OOgbihhm3lJJOOnpWB0i3fWOw56OKoIOJAm3y4HHbadea1AqZ8jkUiwtzBEKemg2xTt9XPi/6XoyucX84gqDI+NGarm7dlzXKObquHy2u56+88tarUhDceBQG1REHnqv9c4iHzhuVirUGuAUQuoXvIlMOlis7xHra40KiwtLhtNfMBanJxJ4rwXCAzSZ1wi63VisH75hNU1FrZdZnpsnXVGJxAfoifje+/29dmrFnACFu1R5r5/c9XvBJ2JElAClXJLNZBZXMEbE7zT4v7fWVtlMbiNZnfQNj+C+iwmrmM6iKQ483ruI70aViqpT1RS8Dss9HerqhRylBri8bhqlDE2zG4+9VZ+wI5XrGpJsw+k039IA6CTXF1la28Js8zIwNIzb3mI8U9UGqVyZUMCL0qyQEu0zuwn5XLfFquoU0xnhOYPTYaKQy2J2h7DfpX/5fIZmU8blsFLOVXGFfIYX7g6gpcwWs4trmJwBBgd7sd2FeK1ZraMpMpJao1CuoNu8BB0ftCk2ayWWl9bQ7V764hFktWm8oJxeN/VSlkxemLgC+HwfdY9rpDeWmFvLEOzsob8r9LEaE7VeRZPMBovWTp9u/6wWspSaCl67iWI+zcZWmrrioKc7xPbyIjXFw+BQP2a1RLpQw+9zUykWhcHM2L+9Ll1vUimepV6bR5I+2P/by7X3f70QEIJ9ZnaDV1+7SCZbNByN74aAEFgOhxWniNMWHvS3Fdp5u9brDUolIfhbDnm3FflUu8IjX6y8XS4HCCe42y/2CWsSbSqVauyd7OOBI7sMU4FwGNeQUYTzuOEqpqAoIupLvOMkYxIhHO5sNjOKiOX/mOsKnzetWWd5aZ3tikI45KEr7L7lt/DRDRUTIvGnqiq1Wh1f0EN620uk89FbDnWSzPryPDdX1omHOggHgljNDTa3a0zsHaO8OcO7Jy5QrMV46muTuG02Q2WSy22QLCqMDnWyOnOTsinIeH+Ay2+/zdtL6+x76hscNmV5/aeXsHSGGZrswWZz0+kxsbo0T14JMBDvxC5r3Dj9Gmfmmzz99afQtuZZqyiMD0ZZujmL7o2xq9fHjUuX0bxx+kIWZm8u4In1Yq5lSOZUegc6Sc/PUlQd2O0SFneQroiTm5evYfZFCVglVlYSOP0uli6+S8Ezxtd/46jQQpNem+b/fPs4wS4rdv8iI8PjOJt5rE43ucI8J68rPDXajztUI1uS6Az58XWEaFTKyCYr9a05fvKPL6F7vNinF9m/dw8eU4XtsoWeaIhAyGNQAmZTaZIpYRqQKCaXsA8cwJJNoHhjdHaYmZ9dQLIF6YvaWZxZxer0IJkkAp1dKMUN5hNlevv7Ubc3WcuUUAtJrtxI8Pjv/XNGOmycP/kiP35rhgOP/Da/8+W96KU1nv/OC2w3YffuXsqylaF4DK9JqHQ2WVlO4Qv5Sd/cIF228eCzE8h1jaZaYX15mabdS2P5AmcW84QOPs4jcRML82t0je3CUcozPb9OeHCEkZ4Otq9f4kpCZd8Th8ldOcnVZoyoS0ZWLPhMVRZuznB5GR586D5Ge7vxBWTePPkKS5sV/N4wM8srHLrvIGGnmZWVeX50+gYPjI8R1dNcnJlB63uQJ8ejrC0sYA72MdITYO7kCSrhEXbv8nL5zLtY+w5iySUwB7vpiziZvn7TePjS2Q2unbmBP9CNtt3AM97F+L59jPaEUKsZfvLaK2ymqsS6x3BaZMqZNequLka7PCxcu0lFktm8OcN2Q6KeT5DSTQwcfYb99jyZuoXR8V14LTKplVm+/Rd/g33fUf7wj36LYKXCqddOogSd6I0aZrMLtzWL3V6nXK0S7B/Bp+WZmd/A3TPCaG+I9OoMbxw/Rbau4lnLY2aU7c1lZFcn3VEPgk1TLTTI5dMk80WDnzp58wLzGZ1HvvY1up0SmWKFsD/QIttoZHnpB/9A3jHAg0M+Lk7doGGzkM+mOGGxk83k8HiDFCtF5NQMZ+YaPPbAIBdPT+Hq3sdzz973sZOLj379tM/+OiAgCFsG+iMM/5tnje7uCOq79V0kgKpVG8YK9M7zQg6KCCSL1XzbRP7OUp/8uNFUqVbqd73WJ6lFtEcIaLPBCAd2p6VVlxHK14oOM6YokoxdOOSLGcSt8HFhW/8kLgNiJS62Ur7AZhI8hv28RZv7cW0Uq3rxJ0iChIe82WJ7r6+31jkSLoeJzZU5jr1xmYHJPnSHjleRuXHxHVRnEGu5wsLiNc5P1el1hJmfvsaGrBH1eFi/eYqzZy9SU91EIt10hO1kc1kq9SaaVCeZTGDWC8xMX8MfizLYG2BrZZOeQw8YNvxyNsnCyjJbBTsX33qVzbkZ8lhZXoly89IVAgOTXDxZYPraZXRPhM5ggMTSFqMTfSzPrWDx+Omjj7IKAAAL3UlEQVRZDpOevUyGIP4OD/6GxqlGmotLGzicHhxlB66Qm5IHlLVFontHUW45IIiQt5pmIRgMkBVhYMcSTPaFUSoZklKZZKqDi0uzSINu7LYAucQyJcWHqZaje9chok2dSP8hnv36Y1y59jZ/+zd/iUdqUqo5GTuwj/7+bjykeXVqjszmOpok0+nWMSUrOExOzCyTy28hWz24ucwL6TIBrYFuC+L121F/+gbrmyukVIWezi48WDC7VS4ubiI13DyjyOhqmeRGmkq+gcUiGaYCWRMz4Bqa3YPfH0DdWuXq1hroDVKzC2yU8qQaDSKWLsPE4B+wYNookChkyWwvMFfXMW9ncXqdTF28QP5KjYDfxurmGhuX0qh2HSWZpyPylAiSp1xqYhJpB7UiF6bOgslCn19G305Qk80sJbw8oMutuHpdo1yv4/R48bnszN28TNFsZX/UwfXLF1laLDIUcjB9/SzLFZ2QdZlLySvMpxt09Zvp7wlQzW9w7OwVTp93U09twPwWG7NzmAPdHD04QnpxgbX1VZTObgqJDLrsxZKvkluus1WroFkeoc+l4nC6iJg6iPh0TrzyEhcuXKbh6+fLD++htDbD1Y11lpI29o93o26kqTnMXD7zFldmLrPVtPHc7/4hXzk6SmE7QabSwIqCWdeN+yuxMMUrL67x6Nd+n9890s/br51levkqLleA2vGr+Hq9mOtpNhMNPL59rC/eJBvay798ZpL86hJn33iNTd2ESZrn7YbK/ft3UTm7ypnr09hGI5jdsHx1Hb/DxtS751mz1KjZQjxxNAB6k6m33+bazBqBiJNlRwVTzx6+9tgebpz4Ad9/5wpdD/82A9I6ieQacZsdSkUEeYdFsI4Vq/d4t3y2VdA9Kmt//SuBgBAyQq7tkNbcu1Mi5M3kuot6bOcnhvpeM1TQO1991k9hx3cKwpefcRPOecKbX6vf3r/bbem37+9c7KOmODtlWmmfheNeNNJBIZ1gbTZJOObDb5WMcO73S957T2gpWtv7z6Yh3EU+aaszzFefGudMdYnggIONRoE9MTf/7+8u4d43zgOH+ynlLhn25o2MZHgH6gMj7Brp5OrliwQ7h4n6bNxYqrFv1zj2LTs+rwentYHL48Ef7cBBCbuU4wevrfD008+ytyditCeXXmV6KUUdG3PLGsFAJ6ZykrWMRv9QL5lUgrOzaYb37Kcr5EHXTFibTbK5DDaXn4BbYm5xg8mhEbzOMIGIH9/5sxy/sk73ofvpdVbZuKQzeeAAV3JX8TvH6OvrYUep6Pb5cbuceCNDHJwcxnllnljvMI3V8yxmMWj9tuc3ie3pY+LgIfxymu99+3nMXXt5rD+IaSuDSVsjk81gslmE6MXribJ7ZJi+/iinXzuN0uEhHPXi8voJ2DQqa9fZKhXpP3wEEteYXs5z39FD+PKzHF8s8sST+9jSXXT67Vz/398hVfdw3yOTKOksVksPkwedXM6eYyDUT1/AArUETX+c7nCRq+fe4eCDk8TdIXz+EF5vkI5wjG6PmTdefJ6LOQ9DTi/egJX09iJF1YzLIpPNbtNc3WY5X8DT4cUkV7C6IuzeFeS7N7LY7G6eeHKSqyffolL08vATu7laS4LFTOdQHFdxjc3NbaSq2SAaCo0f4CHPJse+exrLxFPs8vYyNjqIp5WREI/Hg153Mb7vMPv3jvDO9BKrqxkKlSYOX4BYV4zMdJOyKqPWa5SzeSIDexke7saMjN8XYc+eXnpHgiyf+SlTIgxw7ADxkJNiPoXJacVuarJVE6vrSXq6OqktN+h9YpirN9/mpZ8c4+jeCQ7uPczm+hory1e4NLNCcGA/8ViI0nYOye7E6XHRLHWy//D9KCE3C8UEU9kqTl8fh3ujhnZGPMaFnMr4fRNMz11n6so+ntrXT6RrkMc7hwk7NOYXNikVm9g8YR4+fB9vf/c1UvEwj4/3k05aUOxeYtEo84sa+WIZ4YFcTav07J9ALi9x4tICmXSS9FqSasXJEw89xHZlnvUNCw8dHCWVX+TU1DIPfv0PblF7mlhf2TBWBJn0DIHYER480sfi9UukCfHUQwdY1itUGhqdA7vY3+cllThP/9heHJLGasL0oVW7JJlwuH04HE6Q7mLGuff7p32mjUAbgbshIPwTNJVmqYjZKmPx+giHBD+/sKx+jE7/Q/W5kaS68a0h3AW5TP9gPx6bTKjHhy/goryW4N1z20w+9BD1+hbnLtbxxzsx60nmt9aJToyiykVO35gl2t1Lh7QO1hAPHrawcfUiGRTilSzZZgWrTWVhdYWxA/fhI0dv8iY2ssysSHgdVlKpInufeI64V2Lh+gqrmxvoNjddAQ9qrojdHuDJh7vJl9OoJhtR4S2Y0FE8fnyqSqVaI9YTxet3YrY4cMgy9u44kx4f62L1Ibvp3+3BbNGIBQKYLDkWrl3AbtboHQhQLDQZGR9jYmyYiFumlMlz/sJ5HC4Hk70hNswmIv79VBopzp6f4shkP709UUr+EGq5TL5YoZRf4oUfbhCKD/Dcs0+ztjJHFZvhhNjbFyTv6GZ3t8KFS3PIZg/x8UN0mJxsXz6F2eXlwOggW3PXKZqsPHPfOC6rk7qYJsgKA4cOYM8XqddrhLpjuBUH9abC3oEO5Gqey4sp4q4GFkUwMgWoY2ZjYQNfVMfjM5OupLg5PQNDg1h8UfqsFvweK3OLSSKuCG6bB09QJrO+SqUp4wk7qNQL+KwBYr0yW4Uqu3q6GbHXePv4O7gjUfYdcWM2aYQVO/nkNlabF5N5jhMvv4TP7mVPfwzJrtHQLESGdqP4PeSS61w6f47+eBxvQMHtj9ET7qOvO4xakfFcvMTiZhpXJEbcXGJhepFg1wQHuxoiGwUddifX5q4w1azjdk/SdAUY6I0z2OdBKR9Ay0F6PYXV6sJubbCyncEa6iHuC9LpkshmMyg2L2g1/P4ooWiEqMfMwuwNFjfEJCXEw0f95DNpZJcdryKxupbE7YhyIG4mk9iERo2qycne4TiW1DIls4IiNxHsibpJwWExE/W7KKeTpFJuPJ19PDwQp7w+z8zMApagl7jFhirp9B8YY9tSZur8CkR3UymU8DoCOLOneP75q0QjcTrGOpmbu4jFFeHgxDCbyyvkrDI9e3oMwiar5CIeLbFVyuJ1uwgEQ6iVJHMJEx0OEwcefZxAd5zrM1uGl216booTx8+Av4ux3hDZm6fIOrx0x8fZTmYxu8xsb29RrDeRrDL5fMGIpTXUjQZxh3AKbaKpDpB3pscfesO0v2gj0EbgUyLQVLrpHo5gcdoo58wUP61cR/hfqei0fL4Mhzrx4AqDvAhLEOp/g3DWYJrTEbSCYlYh4g1loccWOn7DDiEbGX4Mfb8sG+76Qi0j7BMinZ9R0S0vQcOkoOsIxwFRu/gvHNF2JiXC5mAytxytNLVpeFCKsmI1JOIixb7wSxB8vi2ThjgjtRwOaLEFKZJklBUFBCbG78V3tzwWhXpGnGm1tKUK2cFeXN84L4gOxG/F7wwsdpIWCDuKbBATtF5yop/GtMqwd7Tq2VGHCI9Pwams3rKFCNpBQavYCksQ/RF1yIpYFelGnQIrRZaNMdAlkaxANtj7xHmxSYqCyJbUYkNqMSMJHIw260Jd1LK5GOVvpVQU2YiEU4csCSKGnZ6KZEwmDCRuja/oq6E8En0WfMnCQeMWreJ732tCxSyO9FYbbiWFEJzJYmwFE5XAR7DtibaIfaPLRl9vjYUxLqKtt7XltjE2+nlrvEW/RF+Me07kKTaIrVq2LKONt6ow7qfbrmFMgLXWPSLqaG07/dtpW+umFDYqsTIWrFHv8URLCmZTK5OTGCcDX+NOM2Bpxdzeej5aGGmt727dMwLL1tgKzkxxTjPqeL9vwuFG2OlAFWp743lq1dGqzxhUY6x3xkzwZIuxFJgKjm3xnO4k0xDPmfEkiOxTor23mMDEvbfjPCSeX0WkxxLJLMQzfou0yMBGlDfGpXXPi9tkp18t6rBbY3sLyfc/jKu+f9jeayPQRuBnRkA44wk5IN5Jnz2+X7xXFEMG/X/NWhMobhGpMQAAAABJRU5ErkJggg==) # I'm not getting quite a color gradient as I would like, because of the really expensive outlier house prices. # So now let's clean up by dropping these outliers df.sort_values("price", ascending=False).head(20) # Sample out top 1% of all houses len(df) * (0.01) bottom_99_perc = df.sort_values("price", ascending=False).iloc[216:] plt.figure(figsize=(12, 8)) sns.scatterplot( x="long", y="lat", data=bottom_99_perc, hue="price", palette="RdYlGn", edgecolor=None, alpha=0.2, ) # **Other features** # Whether or not house is in front of waterfront sns.boxplot(x="waterfront", y="price", data=df) # **Working with feature data** df = df.drop("id", axis=1) # TO datetime object df["date"] = pd.to_datetime(df["date"]) # New column Year df["year"] = df["date"].apply(lambda date: date.year) # New column Month df["month"] = df["date"].apply(lambda date: date.month) # See if whether house prices fluctuate seasonally # Monthly plt.figure(figsize=(10, 6)) sns.boxplot(x="month", y="price", data=df) # Mean Price varying throughout the months df.groupby("month").mean()["price"].plot() # Mean Price varying throught the years df.groupby("year").mean()["price"].plot() # Dropping the date df = df.drop("date", axis=1) df.columns # Zipcode is numerical but not a continuous feature!! # Zipcode mapping is hard and there is no clear continuous distribution of these actual zipcodes, so need to treat them as a categorical feature. df["zipcode"].value_counts() # There are 70 different zipcodes in the data. It is not feasible to get_dummies 70 different categories. So, I will drop the zipcodes. df = df.drop("zipcode", axis=1) # could make sense due to scaling, higher should correlate to more value df["yr_renovated"].value_counts() # The higher the value of this year renovated than the more likely that the House is going to have a higher sale price. # And since 0 actually follows along with this correlation it's almost like the lowest year possible. # Then we should expect that to also have little value. df["sqft_basement"].value_counts() # Same tihng goes for 'sqft_basement', where 0 means there is no basement # **Scaling and Train Test Split** X = df.drop("price", axis=1) y = df["price"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=101 ) # **Scaling** from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_train.shape X_test.shape # **Creating a model** from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation from tensorflow.keras.optimizers import Adam model = Sequential() model.add(Dense(19, activation="relu")) model.add(Dense(19, activation="relu")) model.add(Dense(19, activation="relu")) model.add(Dense(19, activation="relu")) model.add(Dense(1)) model.compile(optimizer="adam", loss="mse") # model.fit(x=X_train, y=y_train, )
# data analysis and wrangling import pandas as pd import numpy as np import random as rnd # visualization import seaborn as sns import matplotlib.pyplot as plt # machine learning from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier from sklearn.metrics import ( f1_score, recall_score, precision_score, accuracy_score, confusion_matrix, log_loss, ) from sklearn.model_selection import KFold import itertools import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) train = pd.read_csv("/kaggle/input/titanic/train.csv") test = pd.read_csv("/kaggle/input/titanic/test.csv") train.head(3) train_x = train.drop(["Survived"], axis=1) train_x2 = train.drop(["Survived"], axis=1) train_y = train["Survived"] # # データ理解(EDA) # モデルや特徴量を作る中で、データ理解は重要。 # 事前にモデルや仮説を想定せず、様々な角度からデータを見ていく探索的データ分析(EDA)をしていく。 train_x = train_x.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1) train_x2 = train_x2.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1) test_x = test.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1) test_x2 = test.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1) # xgboost用 from sklearn.preprocessing import LabelEncoder # カテゴリ変数にlabel encoding for columns in ["Sex", "Embarked"]: le = LabelEncoder() le.fit(train_x[columns].fillna("NA")) # 変換 train_x[columns] = le.transform(train_x[columns].fillna("NA")) test_x[columns] = le.transform(test_x[columns].fillna("NA")) print(train_x.shape) train_x.head() # Logistic regression用 # one-hot encoding from sklearn.preprocessing import OneHotEncoder cat_cols = ["Sex", "Embarked", "Pclass"] ohe = OneHotEncoder(categories="auto", sparse=False) ohe.fit(train_x2[cat_cols].fillna("NA")) # one-hot encodingのダミー変数の列名を作成 ohe_columns = [] for i, c in enumerate(cat_cols): ohe_columns += [f"{c}_{v}" for v in ohe.categories_[i]] # one-hot encodingによる変換 ohe_train_x2 = pd.DataFrame( ohe.transform(train_x2[cat_cols].fillna("NA")), columns=ohe_columns ) ohe_test_x2 = pd.DataFrame( ohe.transform(test_x2[cat_cols].fillna("NA")), columns=ohe_columns ) # one-hot encoding済みの変数を除外する train_x2 = train_x2.drop(cat_cols, axis=1) test_x2 = test_x2.drop(cat_cols, axis=1) # one-hot encodingで変換された変数を結合する train_x2 = pd.concat([train_x2, ohe_train_x2], axis=1) test_x2 = pd.concat([test_x2, ohe_test_x2], axis=1) # 数値変数の欠損値を学習データの平均で埋める num_cols = ["Age", "SibSp", "Parch", "Fare"] for col in num_cols: train_x2[col].fillna(train_x2[col].mean(), inplace=True) test_x2[col].fillna(test_x2[col].mean(), inplace=True) # 変数Fareを対数変換する train_x2["Fare"] = np.log1p(train_x2["Fare"]) test_x2["Fare"] = np.log1p(test_x2["Fare"]) model_xgb = XGBClassifier(eta=1, n_estimators=100, random_state=0, max_depth=7) model_xgb.fit(train_x, train_y) pred_xgb = model_xgb.predict(test_x) model_lr = LogisticRegression(solver="lbfgs", max_iter=300) model_lr.fit(train_x2, train_y) pred_lr = model_lr.predict(test_x2) pred = pred_xgb * 0.8 + pred_lr * 0.2 submission = pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": pred}) submission.to_csv("submission.csv", index=False) pred.shape
import pandas as pd from json import load import urllib.request, json from pandas.io.json import json_normalize import seaborn as sns import pylab as plt cur_url = "https://zenodo.org/api/records/?sort=mostrecent&type=dataset&access_right=open&size=1000" counter = 0 while True: print(cur_url) with urllib.request.urlopen(cur_url) as url: data = json.loads(url.read().decode()) with open("outputs/%02d.json" % (counter), "w") as outfile: json.dump(data, outfile, sort_keys=True, indent=4, ensure_ascii=False) counter += 1 if "next" in data["links"]: next_url = data["links"]["next"] next_page = int(next_url.split("page=")[1].split("&")[0]) if next_page == 10: last_date = data["hits"]["hits"][-1]["created"].split("+")[0] next_url = ( "https://zenodo.org/api/records/?sort=mostrecent&q=created%3A%5B%2A+TO+" + last_date + "%5D&page=1&type=dataset&access_right=open&size=1000" ) cur_url = next_url else: break
import numpy as np import pandas as pd import tensorflow as tf import efficientnet.tfkeras as efn import glob, os import pickle keras = tf.keras layers = keras.layers TRAIN_PATH = glob.glob(r"/kaggle/input/cifar10-python/cifar-10-batches-py/data*") TEST_PATH = [r"/kaggle/input/cifar10-python/cifar-10-batches-py/test_batch"] BATCH_META = r"/kaggle/input/cifar10-python/cifar-10-batches-py/batches.meta" BATCH_SIZE = 100 TRAIN_SIZE = 50000 TEST_SIZE = 10000 EPOCH = 40 AUTOTUNE = tf.data.experimental.AUTOTUNE def load_data(path_lib, dataset_size): image = [] label = [] for path in path_lib: with open(path, "rb") as file: dataset = pickle.load(file, encoding="latin1") x = dataset["data"] y = dataset["labels"] image.append(x) label.append(y) image = np.concatenate(image, axis=0) label = np.concatenate(label, axis=0) image = np.reshape(image, [dataset_size, 3, 32, 32]) image = np.moveaxis(image, 1, 2) image = np.moveaxis(image, 2, 3) label = np.array(label) return image, label def load_meta(path): with open(path, "rb") as file: dictionary = pickle.load(file, encoding="latin1") label_to_name = dict( (index, name) for index, name in enumerate(dictionary["label_names"]) ) return label_to_name dictionary = load_meta(BATCH_META) test_image, test_label = load_data(TEST_PATH, TEST_SIZE) train_image, train_label = load_data(TRAIN_PATH, TRAIN_SIZE) train_dataset = tf.data.Dataset.from_tensor_slices((train_image, train_label)) test_dataset = tf.data.Dataset.from_tensor_slices((test_image, test_label)) train_dataset = ( train_dataset.shuffle(TRAIN_SIZE).repeat().batch(BATCH_SIZE).prefetch(AUTOTUNE) ) test_dataset = test_dataset.batch(BATCH_SIZE).prefetch(AUTOTUNE) optimizer = keras.optimizers.Adam(1e-3) loss = keras.losses.SparseCategoricalCrossentropy() metrics = keras.metrics.SparseCategoricalAccuracy() learning_rate_callback = keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.5, patience=5, min_lr=1e-5 ) base_network = keras.applications.DenseNet201( weights="imagenet", include_top=False, input_shape=[128, 128, 3] ) # base_network = efn.EfficientNetB7(include_top=False,input_shape=(128,128,3),weights='imagenet') network = keras.Sequential( [ layers.UpSampling2D(size=[2, 2], input_shape=[32, 32, 3]), layers.UpSampling2D(size=[2, 2]), base_network, layers.GlobalAveragePooling2D(), layers.Dense(2048), layers.BatchNormalization(), layers.ReLU(), layers.Dense(512), layers.BatchNormalization(), layers.ReLU(), layers.Dense(10, activation="softmax"), ] ) network.summary() network.compile(optimizer=optimizer, loss=loss, metrics=[metrics]) network.fit( train_dataset, epochs=EPOCH, steps_per_epoch=TRAIN_SIZE // BATCH_SIZE, validation_data=test_dataset, validation_steps=TEST_SIZE // BATCH_SIZE, callbacks=[learning_rate_callback], ) network.save(r"./Network.out")
import pandas as pd import numpy as np import matplotlib.pyplot as plt import re import nltk from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # Load all the train, test and submission data tweet_train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") tweet_test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") target = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv") tweet_train.head() # Missing values in the train data tweet_train.isnull().sum() # The output shows location has many null values and keyword with few null values. # Lets see how many real and fake tweets are present in the train dataset. tweet_train["target"].value_counts() # EDA for Real and fake tweets in the dataset real = len(tweet_train[tweet_train["target"] == 1]) fake = len(tweet_train[tweet_train["target"] == 0]) df_count_pie = pd.DataFrame({"Class": ["Real", "Not Real"], "Counts": [real, fake]}) df_count_pie.Counts.groupby(df_count_pie.Class).sum().plot( kind="pie", autopct="%1.1f%%" ) plt.axis("equal") plt.title("Tweeta which are Real or Not") plt.show() # **NLP** # Preprocessing the text data in the train and test dataset. # tweet_train["text"][:3] # The above output shows the text column which needs pre-processing like convert the words to lowercase, remove puntuations and stopwords, tokenize the text to words. stopword = stopwords.words("english") def text_processing(text): text = re.sub("[^\w\d\s]+", "", text) text = text.lower() tok = nltk.word_tokenize(text) words = [word for word in tok if word not in stopword] return words def join_words(words): words = " ".join(words) return words # preprocess the train text data tweet_train["text_pre"] = tweet_train["text"].apply(lambda x: text_processing(x)) tweet_train["text"] = tweet_train["text_pre"].apply(lambda x: join_words(x)) # preprocess the test text data tweet_test["text_pre"] = tweet_test["text"].apply(lambda x: text_processing(x)) tweet_test["text"] = tweet_test["text_pre"].apply(lambda x: join_words(x)) # Look at the train dataframe after the text is preprocessed. tweet_train.head(3) # Let us convert the words into vectors using the NLP CountVectorizer method. This vector is used for the training the model. wrd_vec = CountVectorizer() word_vector = wrd_vec.fit_transform(tweet_train["text"]) test_vector = wrd_vec.transform(tweet_test["text"]) # Let us see the sample submission file target.head(3) tar = target["target"] # **Prediction model** # Logistic Regression clf = LogisticRegression(C=1.0) clf.fit(word_vector, tweet_train["target"]) # Predict the tweets for the test data pred = clf.predict(test_vector) log_score = cross_val_score(clf, word_vector, tweet_train["target"], cv=3) print(log_score) # Random Forest Classifier ran_model = RandomForestClassifier( n_estimators=150, bootstrap=True, max_features="sqrt" ) ran_model.fit(word_vector, tweet_train["target"]) ran_pred = ran_model.predict(test_vector) ran_score = cross_val_score(ran_model, word_vector, tweet_train["target"], cv=3) print(ran_score) target["target"] = ran_pred target.head() # submission file sub_file = pd.DataFrame({"ID": target["id"], "target": target["target"]}) sub_file.to_csv("tweet_submission_file.csv", index=False)
import pandas as pd import numpy as np import seaborn as sns from sklearn import preprocessing import matplotlib.pyplot as plt dc = pd.read_csv("/kaggle/input/santa-2022/image.csv") ds = pd.read_csv("/kaggle/input/santa-2022/sample_submission.csv") print(dc) print(ds) dc.head() ds.head() dc.shape ds.shape dc.dtypes ds.dtypes dc.isnull().sum() ds.isnull().sum() dc.info() ds.info() dc.describe() ds.describe() cc = dc.corr() print(cc) cs = ds.corr() print(cs) sns.heatmap(cc) sns.heatmap(cc, annot=True) sns.pairplot(dc) dc.to_csv("image.csv") ds.to_csv("sample_submission.csv")
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, InputLayer, Flatten, BatchNormalization, Dropout, ) from tensorflow.keras.utils import to_categorical from tensorflow.keras.regularizers import l2 from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns print(tf.__version__) train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") print("Training Data shape: ", train_data.shape) print("Test Data shape: ", test_data.shape) train_data X_train = train_data.iloc[:, 1:].values.reshape(-1, 28, 28, 1) y_train = train_data.iloc[:, :1].values X_test = test_data.values.reshape(-1, 28, 28, 1) plt.imshow(X_train[0]) # Normalise data X_train = X_train / 255 X_test = X_test / 255 # One hot encode y_train_cat = to_categorical(y_train, 10) X_train.shape[1:] # build a simple model, adding additional Conv2D/MaxPooling2D layers may improve accuracy model = tf.keras.models.Sequential( [ tf.keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation="relu"), tf.keras.layers.Dense(10, activation="softmax"), ] ) model.compile("Adam", "categorical_crossentropy", metrics=["accuracy"]) model.fit(X_train, y_train_cat, epochs=3, batch_size=128) # Make the predictions using the Test, unseen data after training predictions = model.predict(X_test).argmax(axis=1) submission = pd.DataFrame(predictions, columns=["Label"]) submission.index.name = "ImageId" submission = submission.rename(columns={0: "Label"}).reset_index() submission["ImageId"] = submission["ImageId"] + 1 submission.head() submission.to_csv("submission.csv", index=False) print("Submission complete")
# # Setup import datetime from typing import Dict, List import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder np.set_printoptions(precision=3) pd.set_option("display.precision", 3) company_df = pd.read_csv("app/resources/Client_Master.csv", dtype=str) company_df.head() detail_df = pd.read_excel("app/resources/company_data_document.xlsx", dtype=str) detail_df.columns = detail_df.iloc[0] detail_df = detail_df[1:] detail_df = detail_df[detail_df["Is Use - Manual"] == "True"] detail_df.reset_index(drop=True, inplace=True) detail_df.head() # # Preprocessing # - Fill null with default value: corresponding type # - Convert type: int, date, string # - Fill null by algorithm: min, max, frequency, encoder # - Handle wrong date # - Drop null # - Drop column have many unique value # ## Drop null company_na_count = company_df.isna().sum() company_na_count = company_na_count * 100 / len(company_df) company_df = company_df.loc[:, company_na_count < 95] company_df.head() # ## Drop many unique value # number_unique_value = {} # # for column in company_df.columns: # if len(company_df[column].unique()) < len(company_df) * 90 / 100: # number_unique_value[column] = len(company_df[column].unique()) # # number_unique_value = dict( # sorted(number_unique_value.items(), key=lambda x: x[1], reverse=True) # ) # # company_df = company_df.loc[:, number_unique_value.keys()] # company_df.head() company_df = company_df[detail_df["Japanese Column Name"].tolist()] company_df.head() # ## Fill null by default and convert to original type mapping_original_column_to_type = {} mapping_target_column_to_type = {} for x in detail_df.iloc: mapping_original_column_to_type[x["Japanese Column Name"]] = x["Column Type"] mapping_target_column_to_type[x["Japanese Column Name"]] = x["Target Type"] set(mapping_original_column_to_type.values()) default_string_value = "<empty>" string_columns = [] for column in company_df.columns: if mapping_original_column_to_type[column] == "STRING": company_df[column] = company_df[column].fillna(default_string_value) company_df[column] = company_df[column].astype(str) string_columns.append(column) if mapping_original_column_to_type[column] == "INT": company_df[column] = company_df[column].astype(float) # ### Simple Label Encoder label_encoders: Dict[str, LabelEncoder] = dict.fromkeys(string_columns) for column in label_encoders.keys(): label_encoders[column] = LabelEncoder() company_df[column] = label_encoders[column].fit_transform(company_df[column]) # ### Fill null by top k frequency def get_median_top_frequency_from_series(series_data: pd.Series, k: int = 10): """Get mean bottom from series.""" return sorted(series_data.value_counts().head(k).index)[k // 2] median_frequency_columns: Dict[int, List[str]] = { 1: [ "資本金(千円)", "前々期業績税引後利益(千円)", "前々期業績自己資本比率(%)", "前期業績税引後利益(千円)", "前期業績自己資本比率(%)", "最新期業績税引後利益(千円)", "最新期業績自己資本比率(%)", "事業所数", ], 10: [ "前期業績売上高(百万円)", "評点", "全国社数", "全国ランキング", "評点", "全国社数", "全国ランキング", "都道府県別社数", "都道府県別ランキング", "前々期業績売上高(百万円)", "最新期業績売上高(百万円)", "代表者生年", ], 8: [ "従業員数", ], 4: [ "株主数", ], } for k, columns in median_frequency_columns.items(): for column in columns: fill_value = get_median_top_frequency_from_series(company_df[column], k) company_df[column] = company_df[column].fillna(fill_value) company_df[column] = company_df[column].astype(int) date_columns = [ "最終コンタクト日", "【DM】最新の発送日", "代表者生年月日", "前々期業績決算期", "前期業績決算期", "COSMOS2更新年月日", ] for column in date_columns: fill_value = get_median_top_frequency_from_series(company_df[column], 1) company_df[column] = company_df[column].fillna(fill_value) company_df.isna().sum().sort_values(ascending=False) # ### Convert string to pandas date YEAR_FLAG_MAPPING = { "明治": 1868, "大正": 1912, "昭和": 1927, "平成": 1990, "令和": 2019, } def preprocessing_japanese_special_date(jp_date_string: str): """Preprocessing Japanese Special Date.""" def corresponding_year(year_flag_str: str): """.""" if year_flag_str in jp_date_string: if "元年" in jp_date_string: return jp_date_string.replace("明治元年", YEAR_FLAG_MAPPING[year_flag_str]) si = jp_date_string.find(year_flag_str) ei = jp_date_string.find("年") year_jp_number = jp_date_string[si:ei] if ei > si else jp_date_string[si:] year_jp_number = int(year_jp_number.replace(flag, "")) return jp_date_string.replace( f"{year_flag_str}{year_jp_number}", str(year_jp_number - 1 + YEAR_FLAG_MAPPING[flag]), ) for flag in YEAR_FLAG_MAPPING.keys(): if flag in jp_date_string: return corresponding_year(flag) return jp_date_string def japanese_date_to_standard_date(japanese_date_string: str): """Japanese Date to standard date. Convert 2018年3月 -> 03/01/2018. """ japanese_date_string = preprocessing_japanese_special_date(japanese_date_string) if "年" in japanese_date_string: japanese_date_string = japanese_date_string.split("年") year = japanese_date_string[0] month = japanese_date_string[1][:-1] return f"{month}/01/{year}" if "-" in japanese_date_string: japanese_date = japanese_date_string.split("-") return f"{japanese_date[1]}/{japanese_date[2]}/{japanese_date[0]}" return japanese_date_string date_columns = [ "前々期業績決算期", "前期業績決算期", ] datetime_columns = [ "登録日", "最終更新日", ] for column in datetime_columns: company_df[column] = company_df[column].apply(lambda x: x.split(" ")[0]) for column in date_columns + datetime_columns: company_df[column] = company_df[column].apply( lambda x: japanese_date_to_standard_date(x) ) company_df[column] = pd.to_datetime(company_df[column]) # ### Convert int to date def convert_int_date(int_value: int) -> datetime.date: """Convert int to date.""" date_obj = datetime.datetime(1899, 12, 30) + datetime.timedelta(days=int_value) new_date_format = date_obj.strftime("%-m/%-d/%Y") return new_date_format int2date_columns = [ "最終コンタクト日", "【DM】最新の発送日", "代表者生年月日", ] for column in int2date_columns: company_df[column] = company_df[column].apply(lambda x: convert_int_date(x)) company_df[column] = pd.to_datetime(company_df[column]) # ## Drop not use not_use_columns = [ "COSMOS2更新年月日", ] company_df.drop(columns=not_use_columns, inplace=True) company_df # # Add new features # ## Handle date column and add new def encode(data, col, max_val): data[col + "_sin"] = np.sin(2 * np.pi * data[col] / max_val) data[col + "_cos"] = np.cos(2 * np.pi * data[col] / max_val) return data all_date_columns = [] for column in company_df.columns: if str(company_df[column].dtype) == "datetime64[ns]": all_date_columns.append(column) for column in all_date_columns: company_df[f"day_{column}"] = company_df[column].dt.day company_df[f"month_{column}"] = company_df[column].dt.month company_df[f"year_{column}"] = company_df[column].dt.year company_df = encode(company_df, f"day_{column}", 31) company_df = encode(company_df, f"month_{column}", 12) company_df.drop(columns=all_date_columns, inplace=True) company_df.head() # ## Scaler # # Export company_df.to_csv("app/resources/company_data_preprocessed.csv", index=False)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import pandas as pd import numpy as np import matplotlib.pylab as plt import seaborn as sns sns.set() from matplotlib.pylab import rcParams rcParams["figure.figsize"] = 11, 5 data = pd.read_csv( "/kaggle/input/ps.csv", index_col="timestamp", parse_dates=True, dayfirst=True ) data.head() data["Day"] = data.index.weekday_name data["Day"] = data["Day"].replace( { "Saturday": 1, "Sunday": 1, "Monday": 0, "Tuesday": 0, "Wednesday": 0, "Thursday": 0, "Friday": 0, } ) from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error import math from sklearn.externals import joblib Xfeatures = ["sub_meter_1", "sub_meter_2", "Day"] X = data[Xfeatures] y = data["main_meter"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) reg = RandomForestRegressor(n_estimators=50, max_depth=30, n_jobs=-1, warm_start=True) a = reg.fit(X_train, y_train) training_accuracy = reg.score(X_train, y_train) test_accuracy = reg.score(X_test, y_test) rmse_train = np.sqrt(mean_squared_error(reg.predict(X_train), y_train)) rmse_test = np.sqrt(mean_squared_error(reg.predict(X_test), y_test)) print( "Training Accuracy = %0.3f, Test Accuracy = %0.3f, RMSE (train) = %0.3f, RMSE (test) = %0.3f" % (training_accuracy, test_accuracy, rmse_train, rmse_test) ) # ## DURBIN_WATSON_FOR_AUTOCORR_ON_statsmodel_MLR import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy as sp import seaborn as sns import statsmodels.api as sm import statsmodels.tsa.api as smt import warnings X_with_constant = sm.add_constant(X_train) model = sm.OLS(y_train, X_with_constant) results = model.fit() results.params print(results.summary()) # ### Auto_Corr_Check - IS AUTO_CORR # Durbin-Watson:1.970, n = 26400 # # Hence, as DWT= 1.970 we have auto_correlation # REMOVE AUTO_CORR from pandas import Series from matplotlib import pyplot from statsmodels.tsa.ar_model import AR from sklearn.metrics import mean_squared_error J = data.value train, test = J[1 : len(J) - 7], J[len(J) - 7 :] model = AR(train) model_fit = model.fit() print("Lag: %s" % model_fit.k_ar) print("Coefficients: %s" % model_fit.params) predictions = model_fit.predict( start=len(train), end=len(train) + len(test) - 1, dynamic=False ) for t in range(len(predictions)): print("predicted=%f, expected=%f" % (predictions[t], test[t])) error = mean_squared_error(test, predictions) print("Test MSE: %.3f" % error) pyplot.plot(test) pyplot.plot(predictions, color="red") pyplot.show()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # Step 1: Import data analysis modules # for basic mathematics operation import numpy as np import pandas as pd from pandas import plotting # for visualizations import matplotlib.pyplot as plt import seaborn as sns plt.style.use("fivethirtyeight") # for interactive visualizations import plotly.offline as py from plotly.offline import init_notebook_mode, iplot import plotly.graph_objs as go from plotly import tools init_notebook_mode(connected=True) import plotly.figure_factory as ff # for path import os # importing the dataset # Step 2 : Data import input_file = pd.read_csv( "/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv" ) input_file.head(5) # checking if there is any NULL data input_file.isnull().any().any() input_file.dtypes import warnings warnings.filterwarnings("ignore") plt.rcParams["figure.figsize"] = (18, 8) plt.subplot(1, 2, 1) sns.set(style="whitegrid") sns.distplot(input_file["MonthlyCharges"]) plt.title("Distribution of Monthly Charges", fontsize=20) plt.xlabel("Range of Monthly Charges") plt.ylabel("Count") labels = ["Female", "Male"] size = input_file["gender"].value_counts() colors = ["green", "orange"] explode = [0, 0.1] plt.rcParams["figure.figsize"] = (9, 9) plt.pie( size, colors=colors, explode=explode, labels=labels, shadow=True, autopct="%.2f%%" ) plt.title("Gender", fontsize=20) plt.axis("off") plt.legend() plt.show() x = input_file.iloc[:, [3, 4]].values # let's check the shape of x print(x.shape) # Hierarchial Clustering**** import scipy.cluster.hierarchy as sch dendrogram = sch.dendrogram(sch.linkage(Data, method="ward")) plt.title("Dendrogam", fontsize=20) plt.xlabel("MonthlyCharges") plt.ylabel("tenure") plt.show() from sklearn.cluster import AgglomerativeClustering hc = AgglomerativeClustering(n_clusters=5, affinity="euclidean", linkage="ward") y_hc = hc.fit_predict(Data) Data = np.array( Data ) # Else ValueError: could not convert string to float: '7590-VHVEG' plt.scatter(Data[y_hc == 0, 0], Data[y_hc == 0, 1], s=100, c="pink", label="miser") plt.scatter(Data[y_hc == 1, 0], Data[y_hc == 1, 1], s=100, c="yellow", label="general") plt.scatter(Data[y_hc == 2, 0], Data[y_hc == 2, 1], s=100, c="cyan", label="target") plt.scatter( Data[y_hc == 3, 0], Data[y_hc == 3, 1], s=100, c="magenta", label="spendthrift" ) plt.scatter(Data[y_hc == 4, 0], Data[y_hc == 4, 1], s=100, c="orange", label="careful") plt.scatter( km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=50, c="blue", label="centeroid", ) plt.style.use("fivethirtyeight") plt.title("Hierarchial Clustering", fontsize=20) plt.ylabel("tenure") plt.legend() plt.grid() plt.show() # Elbow Method**** from sklearn.cluster import KMeans wcss = [] for i in range(1, 11): km = KMeans(n_clusters=i, max_iter=300, n_init=10, random_state=0) km.fit(Data) wcss.append(km.inertia_) plt.plot(range(1, 11), wcss) plt.title("The Elbow Method", fontsize=20) plt.xlabel("No. of Clusters") plt.ylabel("wcss") plt.show() Data = input_file.iloc[:, [5, 18]] # Conversion of datatype float(18) to int kmeans = KMeans(n_clusters=4, init="k-means++", max_iter=300, n_init=10, random_state=0) ymeans = kmeans.fit_predict(x) plt.rcParams["figure.figsize"] = (10, 10) plt.title("Cluster of Ages", fontsize=30) plt.scatter( x[ymeans == 0, 0], x[ymeans == 0, 1], s=100, c="pink", label="Usual Customers" ) plt.scatter( x[ymeans == 1, 0], x[ymeans == 1, 1], s=100, c="orange", label="Priority Customers" ) plt.scatter( x[ymeans == 2, 0], x[ymeans == 2, 1], s=100, c="lightgreen", label="Target Customers(Young)", ) plt.scatter( x[ymeans == 3, 0], x[ymeans == 3, 1], s=100, c="red", label="Target Customers(Old)" ) plt.scatter( kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=50, c="black" ) plt.style.use("fivethirtyeight") plt.xlabel("MonthlyCharges") plt.ylabel("tenure") plt.legend() plt.grid() plt.show()
import numpy as np import PIL.Image import glob import matplotlib.pyplot as plt import os from cv2 import resize import tqdm.auto as tqdm import cv2 import matplotlib def format(shape): x_position = range(0, shape[1], 125) x_label = [str(x // 5) for x in x_position] y_position = np.arange(0, shape[0], 125) y_label = [str(y // 5) for y in y_position] y_position = shape[0] - y_position plt.xticks(x_position, x_label) plt.yticks(y_position, y_label) plt.grid(linestyle="--", color="gray") plt.ylabel("Range distance to origin [km]") plt.xlabel("Azimuth distance to origin [km]") def colormap(key, color="gray"): cmap = matplotlib.cm.get_cmap(key).copy() cmap.set_bad(color=color) cmap.set_under(color=color) return cmap folders = sorted(glob.glob("/kaggle/input/medisar*/*/*/*"), reverse=True) i = 0 pbar = tqdm.tqdm(folders, smoothing=0.001) for folder in pbar: content = os.listdir(folder) if "era5_wind_speed_256.png" not in content: continue wind = np.array(PIL.Image.open(folder + "/era5_wind_speed_256.png")) / 10 mean_wind = np.nanmean(wind[wind > 0]) # if mean_wind > 6: continue mask = np.array(PIL.Image.open(folder + "/mask.png")).astype("float") if mask.mean() > 0.5: continue # Removing example with too much land rain = np.array(PIL.Image.open(folder + "/rain.png")) if rain[:, :, 2][rain[:, :, 3] != 0].mean() < 0.002 * 255: continue # Remove example with too low rain sar = np.array(PIL.Image.open(glob.glob(folder + "/*vv*")[0])) sar_vh = np.array(PIL.Image.open(glob.glob(folder + "/*vh*")[0])) rain = resize(rain, sar.shape[::-1], interpolation=cv2.INTER_NEAREST) rain[rain[:, :, 3] < 128] = (128, 128, 128, 255) plt.figure(figsize=(20, 6)) plt.suptitle(os.path.split(folder)[1]) plt.subplot(131) plt.imshow(sar, cmap="gray", vmin=0, vmax=2**15) format(sar.shape) plt.subplot(132) plt.imshow(sar_vh, cmap="gray", vmin=0, vmax=2**12) format(sar.shape) plt.subplot(133) plt.imshow(rain) format(sar.shape) plt.tight_layout() plt.show() plt.close() i += 1 if i == 50: break i = 0 max_slicks = 0 pbar = tqdm.tqdm(folders, smoothing=0.001) for folder in pbar: content = os.listdir(folder) if "era5_wind_speed_256.png" not in content: continue wind = np.array(PIL.Image.open(folder + "/era5_wind_speed_256.png")) / 10 if wind[wind > 0].mean() > 5: continue # Remove example with too luch wind mask = np.array(PIL.Image.open(folder + "/mask.png")).astype("float") if mask.mean() > 0.5: continue # Removing example with too much land slicks = np.array(PIL.Image.open(folder + "/biological_slicks.png")) / 255 slicks[slicks < 0.75] = 0 max_slicks = max(max_slicks, slicks.mean()) pbar.set_description(f"{max_slicks}") if slicks.mean() < 0.03: continue # Remove example with too low slicks sar = np.array(PIL.Image.open(glob.glob(folder + "/*vv*")[0])) slicks = resize(slicks, sar.shape[::-1], interpolation=cv2.INTER_NEAREST) slicks[resize(mask, slicks.shape[::-1]) > 0.5] = np.nan plt.figure(figsize=(20, 8)) plt.suptitle(os.path.split(folder)[1]) plt.subplot(121) plt.imshow(sar, cmap="gray", vmin=0, vmax=2**12) plt.colorbar(orientation="horizontal", fraction=0.046) format(sar.shape) plt.subplot(122) plt.imshow(slicks, cmap=colormap("gray"), vmin=-(10**-5), vmax=1) plt.colorbar(orientation="horizontal", fraction=0.046) format(sar.shape) plt.tight_layout() plt.show() plt.close() i += 1 if i == 50: break i = 0 pbar = tqdm.tqdm(folders, smoothing=0.001) for folder in pbar: content = os.listdir(folder) if "era5_wind_speed_256.png" not in content: continue wind = np.array(PIL.Image.open(folder + "/era5_wind_speed_256.png")) / 10 mean_wind = np.nanmean(wind[wind > 0]) if mean_wind < 9: continue if mean_wind > 12: continue mask = np.array(PIL.Image.open(folder + "/mask.png")).astype("float") convection = np.array(PIL.Image.open(folder + "/convection.png")) / 255 if convection.mean() < 0.05: continue # Remove example with too low convection if convection.mean() > 0.1: continue # Remove example with too much convection (probably false alarm) sar = np.array(PIL.Image.open(glob.glob(folder + "/*vv*")[0])) convection = resize(convection, sar.shape[::-1], interpolation=cv2.INTER_NEAREST) convection[resize(mask, convection.shape[::-1]) > 0.5] = np.nan plt.figure(figsize=(20, 8)) plt.suptitle(os.path.split(folder)[1]) plt.subplot(121) plt.imshow(sar, cmap="gray", vmin=0, vmax=2**15) plt.colorbar(orientation="horizontal", fraction=0.046) format(sar.shape) plt.subplot(122) plt.imshow(convection, cmap=colormap("gray"), vmin=-(10**-5), vmax=1) plt.colorbar(orientation="horizontal", fraction=0.046) format(sar.shape) plt.tight_layout() plt.show() plt.close() i += 1 if i == 50: break
# Analysis of Pokémon Database # Creator: alopez247 # Notebook Author: João Paulo Ribeiro dos Santos (joaopauloribsantos) # This notebook is intended to apply some concepts and methods that I am learning. For this reason, that it will be updated frequently, until I can answer two questions that have always puzzled me about pokemons: # # What are the ten most powerful pokemons? And how is the distribution of their stats? # Which attributes stands out most in my favorite pokemon? # # # Libraries Import # The following code expresses the main libraries that we will use on this notebook import numpy as np import pandas as pd import sklearn as sk import matplotlib.pyplot as plt import seaborn as sns from matplotlib import rc from matplotlib.colors import ListedColormap from sklearn import preprocessing # Let's Go # Notebook Configurations pd.set_option("display.max_columns", None) # Database Import # In the code below we are creating a dataframe from the pokemon database df_pokemon = pd.read_csv("../input/pokemon/pokemon_alopez247.csv") # Dataframe Operations/ Analysis # Dataframe dimension df_pokemon.shape # 721 Rows and 23 Columns # Columns of dataframe df_pokemon.columns # Columns Types df_pokemon.dtypes # Viewing the 3 first rows df_pokemon.head(3) # Viewing the 3 last rows df_pokemon.tail(3) # The column 'Number' on the dataframe is like the pokemon ID, so we can consider it the index of the dataframe df_pokemon.set_index(["Number"], inplace=True) # Handling Null/ Nan Values # Verifying if exists NAN values on dataset null_columns = df_pokemon.columns[df_pokemon.isnull().any()] df_pokemon[null_columns].isnull().sum() # Percentage of Null/ Nan Values per column in dataframe df_pokemon[null_columns].isnull().sum() * 100 / len(df_pokemon) # Creating a HeatMap plot to show the null of all values in the entire dataframe plt.figure(figsize=(20, 10)) pl = sns.heatmap(df_pokemon.isnull(), cmap="Greens", cbar=False) pl.set_xticklabels(pl.get_xticklabels(), rotation=30) plt.show() # Veifying types of data in column Type_2 df_pokemon["Type_2"].value_counts(dropna=False) # Veifying types of data in column Pr_Male df_pokemon["Pr_Male"].value_counts(dropna=False) # Veifying types of data in column Egg_Group_2 df_pokemon["Egg_Group_2"].value_counts(dropna=False) # According to website bulbapedia (https://bulbapedia.bulbagarden.net/wiki/Egg_Group): ## Egg Groups are categories which determine which Pokémon are able to interbreed. ## The concept was introduced in Generation II, along with breeding. Similar to types, ## a Pokémon may belong to either one or two Egg Groups # Replacing null values df_pokemon["Egg_Group_2"].fillna("Undiscovered", inplace=True) # According to website bulbapedia (https://bulbapedia.bulbagarden.net/wiki/%3F%3F%3F_(type): ## The ??? type is a type that exists only in Generations II, III, and IV. ## It was removed in the Generation V games and has not returned. # Replacing null values df_pokemon["Type_2"].fillna("???", inplace=True) # Checking more about the column 'Pr_Male' # Pr_Male = Probability of a pokemon being male df_pokemon[df_pokemon["Pr_Male"].isnull()].loc[ :, ["Name", "Type_1", "Type_2", "isLegendary", "hasGender"] ] # The pokemons that hasn't gender are the same pokemons who doesn't have probability of being male df_pokemon[df_pokemon["hasGender"] == False & df_pokemon["Pr_Male"].isnull()].loc[ :, ["Name", "Type_1", "Type_2", "isLegendary", "hasGender"] ] df_pokemon.columns # As the variable 'Pr_Male' is totally dependent on the variable 'Has_Gender', # and until that moment there is no reason to consider it in the analysis, # that the variable will be disregarded in a new dataframe. # Creating the new Dataframe df_pokemon_an_01 = df_pokemon.drop(["Pr_Male"], axis=1) df_pokemon_an_01.head(3) # Exploratory Data Analysis (EDA) df_pokemon_eda = df_pokemon_an_01.drop(["Name"], axis=1) # Show the main dataframe statiscs df_pokemon_eda.describe() # The variables 'Name' and describes only categorical pokemon characteristics, # as well as the number of football player's shirts # The previous code showed the boxplot of all variables / columns. plt.figure(figsize=(20, 15)) sns.boxplot(data=df_pokemon_eda) plt.show() # Generating a table with the correlation of all variables df_pokemon_eda[ [ "Total", "HP", "Attack", "Defense", "Sp_Atk", "Sp_Def", "Speed", "Generation", "Height_m", "Weight_kg", "Catch_Rate", ] ].corr() # As we saw earlier, the correlation between some variables is significantly weak, however, # there are some columns with a relatively high correlation, such as 'Total' and 'Attack.' sns.lmplot(x="Attack", y="Total", data=df_pokemon_eda) plt.show() # Detecting the outliers # In this section, we will create a function that returns the main data related to outliers def fn_validate_catching_outliers_values(p_df_dataframe, p_column): """ Description: Validates information related to the dataframe and its column, before proceeding with the function 'fn_catching_outliers'. Keyword arguments: p_df_dataframe -- the dataframe p_column -- the dataframe column Return: None Exception: Validates that the dataframe is empty; Validates whether the column exists on the dataframe; Validates whether the column is a numeric type """ if p_df_dataframe.empty: raise Exception("The dataframe is empty") if p_column not in p_df_dataframe.columns: raise Exception("The column does not exist in the dataframe") if not np.issubdtype(p_df_dataframe[p_column].dtype, np.number): raise Exception("The informed column doesn't have the numeric type.") def fn_catching_outliers(p_df_dataframe, p_column): """ Description: Function that locates outliers in an informed dataframe. Keyword arguments: p_df_dataframe -- the dataframe p_column -- the dataframe column Return: df_with_outliers -- Dataframe with the outliers located df_without_outliers -- Dataframe without the outilers Exception: None """ # Check if the information passed is valid. fn_validate_catching_outliers_values(p_df_dataframe, p_column) # Calculate the first and the third qurtile of the dataframe quartile_1, quartile_3 = np.percentile(p_df_dataframe[p_column], [25, 75]) # Calculate the interquartile value iqr = quartile_1 - quartile_3 # Generating the fence hig and low values fence_high = quartile_3 + (1.5 * iqr) fence_low = quartile_1 - (1.5 * iqr) # And Finally we are generating two dataframes, onde with the outliers values and the second with the values within values df_without_outliers = p_df_dataframe[ (p_df_dataframe[p_column] < fence_low) & (p_df_dataframe[p_column] > fence_high) ] df_with_outliers = p_df_dataframe[ ~p_df_dataframe.isin(df_without_outliers) ].dropna() if df_with_outliers.empty: print("No outliers were detected.") return df_with_outliers, df_without_outliers df_pokemon_out, _ = fn_catching_outliers(df_pokemon_eda, "Attack") df_pokemon_out.head(3) # To provide greater accuracy to the model, it will be necessary to apply some statistical methods to # the categorical variables, such as 'dummies', 'label enconding', etc ... # Identify the amount of unique data per non-numeric column. df_pokemon_eda[df_pokemon_eda.select_dtypes(exclude=np.number).columns].nunique() # Given that categorical variables / columns have more than 10 different types of values, # it will be necessary to apply the scikit-leran method / function, label encondig. encoder = preprocessing.LabelEncoder() categorical_columns = [ "Type_1", "Type_2", "Color", "Egg_Group_1", "Egg_Group_2", "Body_Style", ] for col in categorical_columns: df_pokemon_lb_encoding = encoder.fit_transform(df_pokemon_eda[col]) df_pokemon_eda["encoder_" + col] = pd.DataFrame( df_pokemon_lb_encoding, columns=["encoder_" + col] ) df_pokemon_eda.head(3) # An interesting point to highlight is Catch_Rate, which corresponds to the chances of capturing a pokemon, # which varies from 3 to 245 and it is often not clear whether the pokemon is really # difficult or not to be captured. For this reason, it will be necessary to convert this data into a percentage df_pokemon_eda["Catch_Rate"] = (df_pokemon_eda["Catch_Rate"] * 100) / 245 # Generating a table with the correlation of all variables df_pokemon_correlation = df_pokemon_eda[ df_pokemon_eda.select_dtypes(exclude=["object"]).columns ].corr() df_pokemon_correlation.columns df_pokemon_correlation # The table above contains information on the variables that most correlate. # Next, a heatmap will be created so that we can see this correlation in a more interesting way. mask_pk = np.zeros_like(df_pokemon_correlation, dtype=np.bool) mask_pk[np.triu_indices_from(mask_pk)] = True plt.figure(figsize=(22, 18)) heat_map = sns.heatmap( df_pokemon_correlation, vmin=-1, cmap="coolwarm", annot=True, mask=mask_pk ) heat_map.set_xticklabels(heat_map.get_xticklabels(), rotation=35) plt.show() # Regarding the correlation, we can highlight: # - Given that the Total is the result of the sum of the variables, Attack, Defense, Sp_Atk and Sp_Def, the relatively high correlation between both is normal; # - There is a somewhat strong and negative correlation between Total and Catch_Rate, after all, most of the time, more powerful pokemons are more difficult to be captured; # - Strangely the correlation between Height_m and Weight_kg is not very strong, but just as there are tall people but with little weight, there are also pokemon with these characteristics, like Rayquasa # Creating a regression plot to analyze the Height_m and Weight_km variables sns.lmplot(x="Height_m", y="Weight_kg", data=df_pokemon_eda) plt.show() # What are the ten most powerful pokemons? And how is the distribution of their stats? df_pokemon_top_10_total = df_pokemon.sort_values(by="Total", ascending=False).head(10) df_pokemon_top_10_total.set_index(["Name"], inplace=True) df_pokemon_top_10_total.drop( columns=[ "Type_1", "Type_2", "Generation", "isLegendary", "Color", "hasGender", "Pr_Male", "Egg_Group_1", "Egg_Group_2", "hasMegaEvolution", "Height_m", "Weight_kg", "Catch_Rate", "Body_Style", "HP", "Speed", ], inplace=True, ) df_pokemon_top_10_total.transpose() # With the table above, it is already possible to answer which are the # 10 most powerful pokemons according to the sum of their stats. # Below is a code that gives us the same view of the table, but using bar graphs. # List with thw pokwmons names lst_pokemons_names = df_pokemon_top_10_total.index.values.tolist() # Array for each studied stats np_top10_pokemons_attack = np.array(df_pokemon_top_10_total["Attack"].values.tolist()) np_top10_pokemons_defense = np.array(df_pokemon_top_10_total["Defense"].values.tolist()) np_top10_pokemons_sp_atk = np.array(df_pokemon_top_10_total["Sp_Atk"].values.tolist()) np_top10_pokemons_sp_def = np.array(df_pokemon_top_10_total["Sp_Def"].values.tolist()) # Array Sums snum = ( np_top10_pokemons_attack + np_top10_pokemons_defense + np_top10_pokemons_sp_atk + np_top10_pokemons_sp_def ) # Normalizing the data of arrays np_top10_pokemons_attack = np_top10_pokemons_attack / snum * 100.0 np_top10_pokemons_defense = np_top10_pokemons_defense / snum * 100.0 np_top10_pokemons_sp_atk = np_top10_pokemons_sp_atk / snum * 100.0 np_top10_pokemons_sp_def = np_top10_pokemons_sp_def / snum * 100.0 # Figure / graph size plt.figure(figsize=(20, 20)) plt.title( "Distribution of the stats of the 10 most powerful pokemons", fontdict={"fontsize": 36}, ) # Setting fonts and sizes font = {"weight": "bold", "size": 20} plt.rc("font", **font) # Generating the bar graph of each stats plt.bar(lst_pokemons_names, np_top10_pokemons_attack, label="Attack") plt.bar( lst_pokemons_names, np_top10_pokemons_defense, bottom=np_top10_pokemons_attack, label="Defense", ) plt.bar( lst_pokemons_names, np_top10_pokemons_sp_atk, bottom=np_top10_pokemons_attack + np_top10_pokemons_defense, label="Special Attack", ) plt.bar( lst_pokemons_names, np_top10_pokemons_sp_def, bottom=np_top10_pokemons_attack + np_top10_pokemons_defense + np_top10_pokemons_sp_atk, label="Special Defense", ) # Adding a text with the percentage of each status in relation to the total amount for xpos, ypos, yval in zip( lst_pokemons_names, np_top10_pokemons_attack / 2, np_top10_pokemons_attack ): plt.text(xpos, ypos, "%.1f" % yval + "%", ha="center", va="center") for xpos, ypos, yval in zip( lst_pokemons_names, np_top10_pokemons_attack + np_top10_pokemons_defense / 2, np_top10_pokemons_defense, ): plt.text(xpos, ypos, "%.1f" % yval + "%", ha="center", va="center") for xpos, ypos, yval in zip( lst_pokemons_names, np_top10_pokemons_sp_atk + np_top10_pokemons_attack + np_top10_pokemons_defense / 2, np_top10_pokemons_sp_atk, ): plt.text(xpos, ypos, "%.1f" % yval + "%", ha="center", va="center") for xpos, ypos, yval in zip( lst_pokemons_names, np_top10_pokemons_sp_def + np_top10_pokemons_sp_atk + np_top10_pokemons_attack + np_top10_pokemons_defense / 2, np_top10_pokemons_sp_def, ): plt.text(xpos, ypos, "%.1f" % yval + "%", ha="center", va="center") plt.ylim(0, 110) plt.legend(bbox_to_anchor=(1.01, 0.5), loc="center left") plt.show() # Which attributes stands out most in my favorite pokemon? # First of all I must emphasize that I like dragons a lot and since I was a child I've always loved pokemons with this style, so my favorite pokemon has always been the Charizard # Finding my favorite Pokémon df_pokemon_favorite = df_pokemon.set_index(["Name"]).loc[ ["Charizard"], ["Total", "HP", "Attack", "Defense", "Sp_Atk", "Sp_Def", "Speed", "Catch_Rate"], ] df_pokemon_favorite["Catch_Rate"] = (df_pokemon_favorite["Catch_Rate"] * 100) / 245 df_pokemon_favorite # Next we will compare the attributes of my favorite pokemon with the average of all other pokemon. bar_width = 0.25 bars_pokemon = df_pokemon_favorite.values.tolist()[0] bars_mean_pokemon = np.array( df_pokemon_eda.loc[ :, ["Total", "HP", "Attack", "Defense", "Sp_Atk", "Sp_Def", "Speed", "Catch_Rate"], ] .mean() .values.tolist() ) str_pokemon_favorite_name = "".join(df_pokemon_favorite.index.format()) r1 = np.arange(len(bars_pokemon)) r2 = [x + bar_width for x in r1] plt.figure(figsize=(20, 20)) plt.title( "Comparison of the stats of the favorite pokemon with the average of all pokemon.", fontdict={"fontsize": 36}, ) plt.bar( r1, bars_pokemon, color="red", width=bar_width, edgecolor="white", label=str_pokemon_favorite_name, ) plt.bar( r2, bars_mean_pokemon, color="#557f2d", width=bar_width, edgecolor="white", label="Mean of All Pokemons", ) plt.xticks( [r + bar_width for r in range(len(bars_pokemon))], df_pokemon_favorite.columns.values.tolist(), ) plt.legend(bbox_to_anchor=(1.01, 0.5), loc="center left") plt.show()
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns pd.set_option("display.float_format", lambda x: "%.3f" % x) pd.set_option("display.max_rows", 1000) pd.set_option("display.max_columns", 1000) plt.rcParams["figure.figsize"] = (11, 5) train_df = pd.read_csv("/kaggle/input/titanic/train.csv", index_col=False) train_df.head(100) train_df.describe() train_df.isna().sum() train_df["GenderCode"] = train_df.Sex.astype("category").cat.codes train_df["FamilySize"] = train_df.SibSp + train_df.Parch + 1 train_df["hasCabin"] = ~train_df.Cabin.isna() train_df.Age.fillna( train_df.groupby(["Pclass", "Sex", "FamilySize"]).Age.transform("median"), inplace=True, ) train_df.Embarked.fillna(train_df.Embarked.mode()[0], inplace=True) train_df.isna().sum() def plot_feature(train_df, feature): survived_df = train_df[train_df.Survived == True][feature].value_counts() dead_df = train_df[train_df.Survived == False][feature].value_counts() counts_df = pd.DataFrame([survived_df, dead_df], index=["Survived", "Dead"]) counts_df.plot( kind="barh", stacked=True, legend=True, title="Frequency by " + feature ) (counts_df.div(counts_df.sum(axis=1), axis=0) * 100).plot( kind="barh", stacked=True, legend=True, title="Proportion by " + feature ) def plot_kde(train_df, split_feature, kde_feature): legend = sorted(train_df.dropna(subset=[split_feature])[split_feature].unique()) for key in legend: sns.kdeplot( train_df[train_df[split_feature] == key][kde_feature], shade=True ) # .plot(kind='kde',legend=True) plt.legend(tuple(legend)) plot_feature(train_df, "Sex") plot_feature(train_df, "Pclass") plot_feature(train_df, "Embarked") plot_feature(train_df, "FamilySize") plt.clf() plot_kde(train_df, "Pclass", "Fare") plt.show() plt.clf() plot_kde(train_df, "Pclass", "Age") plt.show() plt.rcParams["figure.figsize"] = (11, 5) plt.clf() plot_kde(train_df, "Embarked", "Fare") plt.show() # plt.clf() # plot_kde(train_df,'Embarked','Age') # plt.show() plt.rcParams["figure.figsize"] = (11, 5) plt.clf() plot_kde(train_df, "Sex", "Fare") plt.show() plt.clf() plot_kde(train_df, "Sex", "Age") plt.show() plt.rcParams["figure.figsize"] = (12, 8) survived_df = train_df[train_df.Survived == True] dead_df = train_df[train_df.Survived == False] sns.scatterplot( train_df.Age, train_df.Fare, hue=train_df.Survived, size=train_df.Fare, palette={0: "red", 1: "green"}, ) plt.rcParams["figure.figsize"] = (13, 10) sns.heatmap(train_df.corr(), annot=True) train_df["GenderAndClass"] = list(zip(train_df.Sex, train_df.Pclass)) train_df.sort_values(by="GenderAndClass", inplace=True) sns.violinplot( train_df.Sex, train_df.Age, hue=train_df.Survived, split=True, palette={0: "r", 1: "g"}, ) sns.violinplot( train_df.GenderAndClass, train_df.Age, hue=train_df.Survived, split=True, palette={0: "r", 1: "g"}, ) sns.violinplot( train_df.Embarked, train_df.Fare, hue=train_df.Survived, split=True, palette={0: "r", 1: "g"}, ) plt.rcParams["figure.figsize"] = (9, 6) train_df.groupby("Pclass").mean()[["Fare", "Age"]].plot( kind="bar", title="Mean Age and Fare by Class" ) plt.hist( [ train_df[(train_df.Survived == False)]["Fare"], train_df[(train_df.Survived == True)]["Fare"], ], color=["r", "g"], label=["Dead", "Survived"], stacked=True, bins=30, ) plt.title("Ticket Fare Histogram") plt.xlabel("Fare") plt.ylabel("Frequency") plt.legend() plt.hist( [train_df[train_df.Survived == False].Age, train_df[train_df.Survived == True].Age], color=["r", "g"], stacked=True, label=["Dead", "Survived"], bins=20, ) plt.title("Age Histogram") plt.xlabel("Age") plt.ylabel("Frequency") plt.legend() plt.hist( [ train_df[(train_df.Survived == False) & (train_df.Sex == "male")].Age, train_df[(train_df.Survived == True) & (train_df.Sex == "male")].Age, train_df[(train_df.Survived == False) & (train_df.Sex == "female")].Age, train_df[(train_df.Survived == True) & (train_df.Sex == "female")].Age, ], color=["red", "green", "indianred", "lime"], stacked=True, label=["Dead Male", "Rescued Male", "Dead Female", "Rescued Female"], bins=20, ) plt.title("Age Histogram") plt.xlabel("Age") plt.ylabel("Frequency") plt.legend() # Extract titles and do EDA # Tease info from # Figure out how to judge importance of features # Apply basic models # Learn how to clean NaNs by binning according to specific categories and then taking median train_df["Title"] = train_df.Name.apply(lambda x: x.split(",")[1].split(".")[0].strip())
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns import scipy from scipy import stats from scipy.stats import norm, skew import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn # ignore warnings from imported libraries # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Raditya Pratama # # # # Forecasting video game sales prediction using regression # The following model involves to **predict the sales outcome of a video game title in a given period of time**. It tries to forecast the copies sold in the observed regions across the world, predominantly from North America, Europe, Japan and etc... # **Scope of the problem addressed is how video game publishers can predict the value and profitability from making the sales in video games**, ranging from the features that affect the sales towards the audience liking, for example, how attractive the *name* is, what popular *platforms* do players use at that time, along with the important factor of *genre* which determines the users' liking on a certain game aspect and observing the global sales overtime to show which region does a specific video game have the high rating for the database worldwide - region varies due to the predominance of income, status and access to technology. # In this project, it will adopt the linear regression method to be applied in the imported dataset. Although the main issue is that there is no quantitative independent variables, therefore the categorical variables such as *platform, genre, publisher* must be converted to numeric first before proceeding. # The dataset description involves the registered list of video game titles with over 100,000 copies sold in multiple regions. # Variable fields include: # Rank - Ranking of overall sales # * Name - The games name # * Platform - Platform of the games release (Wii, Xbox, PS, PC) # # * Year - Year of the game's release # # * Genre - Genre of the game # # * Publisher - Publisher of the game # # * NA_Sales - Sales in North America (in millions) # # * EU_Sales - Sales in Europe (in millions) # # * JP_Sales - Sales in Japan (in millions) # # * Other_Sales - Sales in the rest of the world (in millions) # # * Global_Sales - Total worldwide sales. # * Critic_Score - given score from reviewers # * User_Score - given score from audiences who bought the game # * Developer - Developer of the game # * Rating - Age rating of the video game mentioned # # Initialization of preprocessing the data: # * Collect the data: gather the data on various features that affect the sale of the video game, such as the platform used (Switch, Xbox, PS), its rating, its publisher and marketing budget. # * Prepare the data: clean the data and remove the missing values or outliers. Then to encode it into numerical variables (preprocessing with scikit) using one-hot encoding/label encoding # * Split the data: data will be splut into training and testing sets, the training data is used to train the regression model and the testing set will be used to evaluate performance # * Regression model selection: **random forest regression and support vector regression** will be used in this project to *predict global sales* # * Train the model: Fit the regression model into training data and tune the hyperparameters like cross validation # * Evaluate the model: Mean squared error or R squared can be used to evaluate the performance # * Make predictions: use the trained model to predict new video game title sales forecast and what factors driven to make the sales increase # # Data preprocessing steps: # Continuation of the discussion based on platform, genre and publisher, it will be solved by the following method: # To solve this, the model will use the label encoding method for handling the categorical variables aforementioned; each label will be assigned with a unique integer based on the alphabetical ordering so it can be read by the machine learning model for preprocessing in supervised learning. # Therefore, the first step of the preprocessing is to use the label encoding from sklearn library to replace the categorical value between 0 and the number of classes -1. 0 and n-1 # the proposed step: # * from sklearn.preprocessing import LabelEncoder # Methodology used in data preprocessing: # ![methodology video game prediction.drawio (2).png](attachment:a1b1ce6c-25a2-4926-bee7-e7b497e0c452.png) # Data preparation and analysis: # Statistical aspects: # Check data dimension # Rows, columns and column names # Data types # Cleaning the data # look for missing data and outliers # identify and convert the categorical values to numerical representation # Statistical calculation: # find relationship of columns and how it affects # check correlation and chi square # correlation relates to numerical columns and chi square relates to categorical columns # Graphical representation of data: # Perform visualization on dataset data = pd.read_csv( "/kaggle/input/video-games-sales-as-at-22-dec-2016csv/Video_Games_Sales_as_at_22_Dec_2016.csv" ) data1 = data.copy() display(data1.head()) display(data1.tail()) # given 16 columns: # * global sales will be taken as the variable for dependent variable - the ones we are trying to predict # * independent variables - will either be the critic or the user score: this defines the liking of a certain user data1.info() print(data.shape) # identifying outliers / missing data fig, ax = plt.subplots() ax.scatter(x=data["Critic_Score"], y=data["Global_Sales"]) plt.ylabel("Global_Sales", fontsize=13) plt.xlabel("Critic_Score", fontsize=13) plt.show() # on the global sales y axis, there is one depicted outlier on y value of 80 - we need to eradicate it data = data.drop(data[(data["Critic_Score"] > 60) & (data["Global_Sales"] > 60)].index) print(data) # identifying outliers / missing data fig, ax = plt.subplots() ax.scatter(x=data["Critic_Score"], y=data["Global_Sales"]) plt.ylabel("Global_Sales", fontsize=13) plt.xlabel("Critic_Score", fontsize=13) plt.show() # outlier(s) removed # Check distribution of dependent variable, target dependency the user needs to predict is the *Global_Sales* variable sns.distplot(data["Global_Sales"], fit=norm) # Get the fitted parameters used by the function (mu, sigma) = norm.fit(data["Global_Sales"]) print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma)) # Now plot the distribution plt.legend( ["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best" ) plt.ylabel("Frequency") plt.title("Global_Sales distribution") # Get also the QQ-plot fig = plt.figure() res = stats.probplot(data["Global_Sales"], plot=plt) plt.show() # The results illustrate that the distribution graph of Global_Sales is not normally distributed at all, it is aligned far to the left. Meaning that further investigation is required before splitting the data into training and testing set. # To avoid this type of problem, implement the feature correlations so that the variables do not suffer from autocorrelation. This can be an issue in linear regression if need to find intercepts. # Now, we need to plot correlation heatmap with the aid of seaborn. str_list = [] # empty list to contain columns with strings (words) for colname, colvalue in data.iteritems(): if type(colvalue[2]) == str: str_list.append(colname) # Get to the numeric columns by inversion num_list = data.columns.difference(str_list) # Create Dataframe containing only numerical features data_num = data[num_list] f, ax = plt.subplots(figsize=(14, 11)) plt.title("Pearson Correlation of Video Game Numerical Features") # Draw the heatmap using seaborn sns.heatmap( data_num.astype(float).corr(), linewidths=0.25, vmax=1.0, square=True, cmap="cubehelix_r", linecolor="k", annot=True, ) # Now, the purpose of creating this heatmap is improve the confidence of choosing which variable can be selected for correlation. The independent variables mentioned in this heatmap are not highly correlated, *except for the sales number which relates to each other.* Therefore, sales are the factor of success in video game publishing, since if sales are well in a region, then the other region will soon follow. This means that the confidence of using Global_Sales variable as the measurement of correlation is highly recommended. # # Steps to remove what variables are not required and keeping the crucial variables that relates to Global_Sales # **most importantly is to remove values with N/A or none data inside** # Afterwards, is to derive features through feature engineering data_na = (data.isnull().sum() / len(data)) * 100 data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({"Missing Ratio": data_na}) missing_data.head(16) # There are lots of missing data with critic_score, which accounts for 51.33 % ratio of data considered as N/A, User_Score and Critic_Score are more or less the same, which has great confidence as the independent vaiable, however there are many missing data, therefore it cannot easily be filled with median values. # The users needs to check other variables that can account into factor of game sales, preferably the video game consoles then choose which consoles are the famous or highly rated in this data (common occurring) to cover the missing ratio of independent variables changed. print(pd.value_counts(data["Platform"])) # In this century, it is best to find the relevant consoles at the time, values above 140 is best considered, which ranges from Wii U until PS4/XboxOne as the new generation consoles. # This prevents the minimal data issue by keeping relevant consoles. # use | as OR function in Python data = data[ (data["Platform"] == "PS3") | (data["Platform"] == "PS4") | (data["Platform"] == "X360") | (data["Platform"] == "XOne") | (data["Platform"] == "Wii") | (data["Platform"] == "WiiU") | (data["Platform"] == "PC") ] # Let's double check the value counts to be sure print(pd.value_counts(data["Platform"])) # Let's see the shape of the data again print(data.shape) # Lets see the missing ratios again data_na = (data.isnull().sum() / len(data)) * 100 data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({"Missing Ratio": data_na}) missing_data.head(16) # 38% missing data points is still too large to implement median, therefore, drop all rows that have N/A content in Critic_Score column data = data.dropna(subset=["Critic_Score"]) # Let's see the shape of the data again print(data.shape) # Lets see the missing ratios again data_na = (data.isnull().sum() / len(data)) * 100 data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({"Missing Ratio": data_na}) missing_data.head(16) # Dealing further with none values # Publisher, fill N/A with mode: data["Publisher"] = data["Publisher"].fillna(data["Publisher"].mode()[0]) # Developer fill N/A with mode data["Developer"] = data["Developer"].fillna(data["Developer"].mode()[0]) # Rating fill N/A with mode data["Rating"] = data["Rating"].fillna(data["Rating"].mode()[0]) # Release year with median data["Year_of_Release"] = data["Year_of_Release"].fillna( data["Year_of_Release"].median() ) # User score and User Count with median data["User_Score"] = data["User_Score"].replace("tbd", None) data["User_Score"] = data["User_Score"].fillna(data["User_Score"].median()) data["User_Count"] = data["User_Count"].fillna(data["User_Count"].median()) # check for missing ratios data_na = (data.isnull().sum() / len(data)) * 100 data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({"Missing Ratio": data_na}) missing_data.head(16) # Create dummies for the variables aforementioned: print(data.shape) # pre-dummies data = pd.get_dummies(data=data, columns=["Platform", "Genre", "Rating"]) print(data.shape) # post-dummies data.head # # Important step: At this point, when all of the data is held and before splitting it into training and testing data # **Ensure that ONLY required data is needed to be trained and tested, which the variables of User_Score, Release Year, Critic Score, User Count, Platform are taken into factor of measurement** print(data.columns) # easy to copy-paste the values to rearrange from here X = data[ [ "Year_of_Release", "Critic_Score", "Critic_Count", "User_Score", "User_Count", "Platform_PC", "Platform_PS3", "Platform_PS4", "Platform_Wii", "Platform_WiiU", "Platform_X360", "Platform_XOne", "Genre_Action", "Genre_Adventure", "Genre_Fighting", "Genre_Misc", "Genre_Platform", "Genre_Puzzle", "Genre_Racing", "Genre_Role-Playing", "Genre_Shooter", "Genre_Simulation", "Genre_Sports", "Genre_Strategy", "Rating_E", "Rating_E10+", "Rating_M", "Rating_RP", "Rating_T", ] ] Y = data[["Global_Sales"]] # Double checking the shape print(X.shape) print(Y.shape) from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=42) # Let's check the shape of the split data as a precaution print("X_train shape: {}".format(X_train.shape)) print("Y_train shape: {}".format(Y_train.shape)) print("X_test shape: {}".format(X_test.shape)) print("Y_test shape: {}".format(Y_test.shape)) # Now, before fitting the models in final, scale the data first by using log transformation of log(1+x), this imports the function from numpy that is called log1p Y_train = np.log1p(Y_train) Y_test = np.log1p(Y_test) # Draw the new distribution Y_log_transformed = np.log1p( data["Global_Sales"] ) # For comparison to earlier, here's the whole Y transformed sns.distplot(Y_log_transformed, fit=norm) # Get the fitted parameters used by the function (mu, sigma) = norm.fit(Y_log_transformed) print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma)) # plot the distribution plt.legend( ["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best" ) plt.ylabel("Frequency") plt.title("Global_Sales distribution") # Draw the QQ-plot fig = plt.figure() res = stats.probplot(Y_log_transformed, plot=plt) plt.show() # Now to fit the data into the models itself. We will use the X_train and scale the variable with X_Test using the MinMax Scaler from Scikit to fit all the indenpent variables (changing variables) in similar ranges. from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) # # Regression Model: Code # 1. Support Vector Regressor # 2. Random Forest # begin making the model by making parameter grids param_grid_lr = [{}] # ----SVR----# param_grid_svr = [ {"C": [0.01, 0.1, 1, 10], "gamma": [0.0001, 0.001, 0.01, 0.1, 1], "kernel": ["rbf"]} ] # ----Random Forest----# param_grid_rf = [ { "n_estimators": [3, 10, 30, 50, 70], "max_features": [2, 4, 6, 8, 10, 12], "max_depth": [2, 3, 5, 7, 9], } ] # After creating the parameters, implement the models sequentially and assess which model is best to forecast video game sales # To assess the models, use **RMSE (Root Mean Squared Error)**; t*he standard deviation of residuals or known as prediction errors, basically a way to measure the differneces between values and to measure the error of a given model when it is used to predict quantitative data.* # RMSE is a judgement factor of how accurately the model can predict the outcome of the video game sales response. # By using RMSE, it will tell the error in actual sales unit. # Note: exponential transformation is requied on RMSE scores, if the RMSE was in logarithmic values - otherwise it remains on log(Global_Sales) # Use cross validation: from sklearn.linear_model import LinearRegression from sklearn.model_selection import GridSearchCV grid_search_lr = GridSearchCV( LinearRegression(), param_grid_lr, scoring="neg_mean_squared_error", cv=5 ) grid_search_lr.fit(X_train, Y_train) print("Best parameters: {}".format(grid_search_lr.best_params_)) lr_best_cross_val_score = np.sqrt(-grid_search_lr.best_score_) print("Best cross-validation score: {:.2f}".format(np.expm1(lr_best_cross_val_score))) lr_score = np.sqrt(-grid_search_lr.score(X_test, Y_test)) print("Test set score: {:.2f}".format(np.expm1(lr_score))) # # IMPLEMENTATION OF MODELS # INTRODUCTION, DEFINITIONS, RESULT, SCORE AND EVALUATION # SUPPORT VECTOR REGRESSION: # What is Support Vector Regression? # Support Vector Regression (SVR) is a type of regression algorithm that is based on the concept of *Support Vector Machines (SVM) to determine the outcome of relationships between dependent and independent variables*. It is one of the machine learning models that is utilized to predict continous variables such as stock prices, housing prices and etc based on input variables or features. # The main aspect of SVR is to *identify a hyperplane that best fits the training data so that the error between predicted values and actual values is reduced*. So it tries its best to fit in the correct position of hyperplane to separate the data by the largest margin and uses it to predict the value of the dependent variable for the new data. # This is unique compared to MSE often adopted in ordinary linear regression models to minimize the distance between predicted and actual value according to users' configuration. # The SVR hyperplane is chosen to have a maximum margin; *maximum distance between hyperplane and nearest data points on each side - this is used to segregate the data into 2 classes*: one that lies above and other that lies below the hyperplane. # Goal:** minimize prediction errors and maximize margins** # **SVR Formula**: # *The basic formula for Support Vector Regression (SVR) is similar to that of linear regression, but with additional parameters and constraints to find the optimal hyperplane:* # y = w^T * x + b # where: # y is the predicted output value # x is the input feature vector # w is the weight vector that represents the coefficients of the hyperplane # b is the bias term or intercept # To find the optimal hyperplane, SVR introduces two additional parameters: # C: a regularization parameter that controls the tradeoff between the model complexity and the error on the training data. # ε: a margin parameter that defines the acceptable range of error from the true output value. # The optimization problem for SVR is to find the weight vector w and the bias term b that minimize the following objective function subject to the constraints: # minimize (1/2) * ||w||^2 + C * Σ(max(0, |y - y_hat| - ε)) # subject to: # y_hat = w^T * x_i + b, for all i = 1,2,..,n # |y - y_hat| <= ε, for all i = 1,2,..,n # where: # n is the number of training examples # y is the true output value # y_hat is the predicted output value # ||w||^2 is the L2 norm of the weight vector w # Σ(max(0, |y - y_hat| - ε)) is the sum of the errors above the margin ε # The objective function is a tradeoff between the complexity of the model (controlled by ||w||^2) and the error on the training data (controlled by the sum of errors). The constraints ensure that the predicted output values are within the margin ε from the true output values. # modeling the relationship of 2 variables; independent and dependent variables, the SVR uses kernel function to transform input data into feature space that has higher dimension. Kernel calculates the similarities of the data pairs that will derive its scalar to represent similarity. Then afterwards, SVR creates the new feature space are then used to predict value of the dependent variable for new data. # **Given in this model, this Support Vector Regressor model uses** ***RBF - Radial Basis Function Kernel*** # A kernel function used to transform input data into higher dimensional feature space to implement a linear regression model to fit the data. So what the RBF kernel does is that it measures the similarity between two examples in the input space using a Gaussian Function in an infinite feature space. # K(x, x') = exp(-gamma ||x - x'||^2) # where x and x' are the input examples, ||x - x'||^2 is the squared Euclidean distance between the examples, and gamma is a hyperparameter that determines the width of the Gaussian function. # Advantages and Disadvantages of RBF: # Adv: # 1. RBF is able to capture complex non linear relationship in data which can be mapped into higher dimensional space then a linear hyperplane can be drawn to segregate the data # 2. Computationally efficient; does not need to compute the full kernel matrix - only the pairwise distances between support vectors and new examples # 3. Able to handle complex data and can be tuned with gamma parameter # 4. Only has one hyperparameter: width of Gaussian kernel, so it is simple to adjust compared to other kernels # Disadv: # 1. RBF kernel is sensitive to choice of hyperparameter and performance is highly dependent on the value of the width of Gaussian Kernel. It requires complex calculation and manual selection of finding the best or optimal value for hyperparamter which requires expertise # 2. RBF kernel is prone to overfitting if the width of Gaussian kernel is tiny / underfitting if width is too large. Balancing can be difficult for differentiating complexity and generalization # 3. Scaling sensitivity of input features since it requires Euclidean distance between examples. As shown in this model, it has been scaled - if it is not scaled in other circumstances, some features might dominate the distance measure which can affect the model's performance # SVR CODE from sklearn.svm import SVR grid_search_svr = GridSearchCV( SVR(), param_grid_svr, cv=5, scoring="neg_mean_squared_error" ) grid_search_svr.fit(X_train, Y_train) print("Best parameters: {}".format(grid_search_svr.best_params_)) svr_best_cross_val_score = np.sqrt(-grid_search_svr.best_score_) print("Best cross-validation score: {:.2f}".format(np.expm1(svr_best_cross_val_score))) svr_score = np.sqrt(-grid_search_svr.score(X_test, Y_test)) print("Test set score: {:.2f}".format(np.expm1(svr_score))) # Random Forest Regression: What is Random Forest Regression? # Random forest regression is a machine learning algorithm used in regression problems. It is one of the variant from Random Forest model that is used classification problems. Essentially, it is an approach of regression based on collection of decision trees, hence the name random forest. # Random forest regressor generates decision trees on randomly selected subsets of input data and outputs the mean or median prediction of individual trees as final prediction. # ===Keypoint: Builds several decision trees and combine prediction results of each of the decision trees to provide accurate predictions... # Procedural steps of how the algorithm/model work: # 1. Random subsets of input data are selected with replacement, this technique is known as bagging - each subset is used to train decision tree model # 2. At each node of decision tree, random subset of input features are selected, best feature is chosen to split the node # 3. Decision tree will be grown until stopping criterion is reached, maximum depth of minimum number of samples are required to split the node # 4. Make a forest of decision trees # 5. Make prediction for new example, Random Forest Regressor algorithm takes average or median of predictions of decision trees generated in the forest # RF Regressor Formula: # The way how the model/algorithm work is that: # 1. Initiate the set of decision trees are trained on different subsets of the input data using bagging and feature randomization # 2. In new input X, RF regressor predicts target variable Y by aggregating the predictions of all individual decision trees = can be done taking average or median of predicted values # Given a training set {(X1, Y1), (X2, Y2), ..., (Xn, Yn)} where Xi is the input example with m features and Yi is the target variable, the Random Forest Regressor algorithm generates T decision trees, each trained on a random subset of the input data and a random subset of the input features. # Let the predicted value of the ith decision tree be denoted as Yi^t(X), where t is the index of the decision tree. # Then, the predicted value of the Random Forest Regressor algorithm for a new input example X is given by: # Y = (1/T) * ∑(i=1 to T) Yi^t(X) # Y = final predicted value # sum is taken over all T decision trees # RF regressor uses weighted average or median of predicted values; weights are proportional to accuracy of each decision tree. # Advantage and Disadvantages of Random Forest Regressor: # Adv: # 1. Random Forest can handle both categorical and continous input features # 2. Can handle missing/noisy data, also performs well with datasets that has high dimensionality. Handles data that does not have normal distribution as well # 3. Can identiy the most significant independent variables that affects the variablity of dependent variable # 4. Less prone to overfitting compared to normal individual decision trees, since it adopts averaging or using median based aggregation of predictions with the help of multiple trees # 5. Provides importance scores for input features that allows identification of most significant input features in prediction # Disadv: # 1. Random Forest Regressor are slower to train and making predictions can be hindered compared to simpler regression algorithms (higher computational time complexity) # 2. More difficult to interpret than linear regression model or individual decision trees; does not provide clear relationship explanation of independent and dependent variables overloads the system resource usage which decreases performance that requires the user to tune it carefully # 3. Hyperparameters: number of trees, depth of trees and number of features used at each node severely # 4. Random Forest Regressor does not perform well on datasets that has imbalanced class distributions, since the majority will dominate the tree building process that might lead to biased predictions result from sklearn.ensemble import RandomForestRegressor grid_search_rf = GridSearchCV( RandomForestRegressor(), param_grid_rf, cv=5, scoring="neg_mean_squared_error" ) grid_search_rf.fit(X_train, Y_train) print("Best parameters: {}".format(grid_search_rf.best_params_)) rf_best_cross_val_score = np.sqrt(-grid_search_rf.best_score_) print("Best cross-validation score: {:.2f}".format(np.expm1(rf_best_cross_val_score))) rf_score = np.sqrt(-grid_search_rf.score(X_test, Y_test)) print("Test set score: {:.2f}".format(np.expm1(rf_score))) # # Conclusion of the 2 models used: # Plotting feature importance of what determines a good video game sales forecast: # from sklearn.svm import SVR # =======Depicting feature importance using Support Vector Regressor======= feature_importance = grid_search_svr.best_estimator_.feature_importances_ # make importances relative to max importance feature_importance = 100.0 * (feature_importance / feature_importance.max()) sorted_idx = np.argsort(feature_importance) pos = np.arange(sorted_idx.shape[0]) + 0.5 plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 2) plt.barh(pos, feature_importance[sorted_idx], align="center") plt.yticks( pos, X_train.columns.values[sorted_idx] ) # Not 100 % sure the feature names match the importances correctly... plt.xlabel("Relative Importance") plt.title("Variable Importance") plt.show() # Apparently, SVR does not support feature importances unlike Random Forest to Visualize the factors of making a good video game sales prediction # =======*** Depicting feature importance using Random Forest Regressor ***======= feature_importance = grid_search_rf.best_estimator_.feature_importances_ # make importances relative to max importance feature_importance = 100.0 * (feature_importance / feature_importance.max()) sorted_idx = np.argsort(feature_importance) pos = np.arange(sorted_idx.shape[0]) + 0.5 plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 2) plt.barh(pos, feature_importance[sorted_idx], align="center") plt.yticks( pos, X_train.columns.values[sorted_idx] ) # Not 100 % sure the feature names match the importances correctly... plt.xlabel("Relative Importance") plt.title("Variable Importance") plt.show()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import matplotlib.pyplot as plt import seaborn as sns import datetime as dt df = pd.read_csv("/kaggle/input/avocado-prices/avocado.csv", index_col=None) del df["Unnamed: 0"] df.head() df.shape df.info() df["Date"] = pd.to_datetime(df["Date"]) df.head() df["region"].value_counts() df["region"].astype("category", inplace=True) df["type"].value_counts() df["type"].astype("category", inplace=True) df.info() df.head(3) df.describe() type(df["type"]) print(df.head()) print(df["AveragePrice"].mean()) print(df["AveragePrice"].median()) print(df["AveragePrice"].mode())
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas_profiling test1 = pd.read_csv("../input/test1.csv") train1 = pd.read_csv("../input/train1.csv") train1.info() test1.info() train1.describe() test1.describe() train1.drop(train1.columns[[0]], axis=1, inplace=True) test1.drop(test1.columns[[0]], axis=1, inplace=True) train1[train1.isnull().any(axis=1)].count() test1[test1.isnull().any(axis=1)].count() test1.isna().sum() train1.isna().sum() train1["PROD_CD"].unique() train1.info() train1["PROD_CD"].value_counts() train1["SLSMAN_CD"].value_counts() train1["PLAN_MONTH"].value_counts() train1["PLAN_YEAR"].value_counts() train1["TARGET_IN_EA"].value_counts() train1["ACH_IN_EA"].value_counts() import re p = re.compile(r"\D") train1["TARGET_IN_EA"] = [p.sub("", x) for x in train1["TARGET_IN_EA"]] train1["ACH_IN_EA"] = [p.sub("", x) for x in train1["ACH_IN_EA"]] test1["TARGET_IN_EA"] = [p.sub("", x) for x in test1["TARGET_IN_EA"]] train1.head() train1["TARGET_IN_EA"] = pd.to_numeric(train1["TARGET_IN_EA"]) train1["ACH_IN_EA"] = pd.to_numeric(train1["ACH_IN_EA"]) test1["TARGET_IN_EA"] = pd.to_numeric(test1["TARGET_IN_EA"]) train1.info() test1.info() pandas_profiling.ProfileReport(train1) plt.scatter(x="TARGET_IN_EA", y="ACH_IN_EA", data=train1) sns.pairplot(train1.iloc[:, :]) sns.pairplot(test1.iloc[:, :]) train1.boxplot() test1.boxplot() string_column = ["PROD_CD", "SLSMAN_CD"] from sklearn import preprocessing number = preprocessing.LabelEncoder() for i in string_column: train1[i] = number.fit_transform(train1[i]) test1[i] = number.fit_transform(test1[i]) colnames = list(train1.columns) # colnames=list(test1.columns) train1.info() test1.info() train1.head(10) test1.head(10) plt.hist("ACH_IN_EA", data=train1) train1["ACH_IN_EA"].value_counts()[:20].plot(kind="barh") from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier train, test = train_test_split(train1, test_size=0.2, random_state=0) x = colnames[:5] y = colnames[5] # model=RandomForestClassifier(n_jobs=3,oob_score=True,n_estimators=100,criterion='entropy') # model.fit(train[x],train[y]) ##pred=model.predict(test[x]) # pd.Series(pred).value_counts() # np.mean(pd.Series(train1.ACH_IN_EA).reset_index(drop=True)==pd.Series(model.predict(train[x]))) ## ##model=RandomForestClassifier(n_jobs=3,oob_score=True,n_estimators=100,criterion='entropy') # model.fit(train1,test1) # pred=model.predict(test1) # pd.Series(pred).value_counts() # np.mean(pd.Series(train1.ACH_IN_EA).reset_index(drop=True)==pd.Series(model.predict(train1))) # np.mean(pred==test.ACH_IN_EA)
# # Introduction # This notebook is a development of the final exercise from the ‘[Intro to Machine Learning](https://www.kaggle.com/learn/intro-to-machine-learning)’ micro-course. # import math from operator import itemgetter from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import KFold from sklearn.neighbors import KNeighborsRegressor from sklearn.preprocessing import OneHotEncoder # # Data # Meanings of all features are described [here](https://www.kaggle.com/c/home-data-for-ml-course/data). train_df = pd.read_csv("../input/train.csv", index_col="Id") train_df X_train = train_df.drop(columns="SalePrice") y_train = train_df["SalePrice"] X_test = pd.read_csv("../input/test.csv", index_col="Id") X_test X_all = pd.concat([X_train, X_test]) # # Numeric features numer_features = set(X_train.select_dtypes(exclude=["category", "object"]).columns) sorted(numer_features) X_all[numer_features].hist( bins=50, log=True, figsize=(15, 40), layout=(math.ceil(len(numer_features) / 3), 3) ) # Check if there are missing values: nan_counts = X_all[numer_features].isna().sum() nan_counts[nan_counts > 0].sort_index() # Most of these columns seem to refer to properties of a non-existent garage or basement. Set all these areas and numbers of bathrooms to zeros (note the distributions above already have peaks at zeros): for feature in [ "BsmtFinSF1", "BsmtFinSF2", "BsmtFullBath", "BsmtHalfBath", "BsmtUnfSF", "GarageArea", "GarageCars", "MasVnrArea", "TotalBsmtSF", ]: for X in [X_train, X_test]: X.loc[X[feature].isna(), feature] = 0.0 # Missing built year for garage does not necessarily imply that the property doesn't have a garage: X_all[~X_all["GarageYrBlt"].isna()]["GarageArea"].hist() # Assume that the garage was built in the same year as the house then. Also change the year for a time-travelling garage from the XXIII century, which can be seen in the distributions above. for X in [X_train, X_test]: selection = X["GarageYrBlt"].isna() | (X["GarageYrBlt"] > 2015) X.loc[selection, "GarageYrBlt"] = X.loc[selection, "YearBuilt"] # The last column with missing values is `LotFrontage`. It's not clear what is the reason for having missing value there. Could they indicate that the house is situated at a substantial distance from the road? Then this would probably affect the average price, but it doesn't seem to: y_train.groupby(X_train["LotFrontage"].isna()).mean() # Try to predict the missing values from other features. The single most relevant one is probably `LotArea`. The first plot below compares the distributions of the area for properties with known and missing lengths of the frontage. Although in the latter case the distribution has a heavier tail, the difference is not drastic. The second plot illustrates the dependence between `LotFrontage` and square root of `LotArea`. There is a somewhat linear trend with a large variance. selection = ~X_all["LotFrontage"].isna() fig = plt.figure(figsize=(12.8, 4.8)) axes = fig.add_subplot(1, 2, 1) axes.hist( [X_all.loc[selection, "LotArea"], X_all.loc[~selection, "LotArea"]], label=["Present", "Missing"], bins=30, density=True, ) axes.set_yscale("log") axes.set_xlabel("LotArea") axes.legend() axes = fig.add_subplot(1, 2, 2) axes.scatter( X_all.loc[selection, "LotArea"].apply(np.sqrt), X_all.loc[selection, "LotFrontage"], s=1, ) axes.set_xlabel("sqrt(LotArea)") axes.set_ylabel("LotFrontage") # I don't expect `LotFrontage` to bring a lot of information on top of the area, property type, neighbourhood, and so on. Not to spend much time on it, I'm going to impute it with a simple kNN regressor. It captures the linear scaling in the bulk of the distribution but at the same time does not extrapolate it to extreme values, which is probably a sane model. For a more accurate treatment see [this notebook](https://www.kaggle.com/ogakulov/lotfrontage-fill-in-missing-values-house-prices). sel = ~X_train["LotFrontage"].isna() fX = X_train.loc[sel, "LotArea"].apply(np.sqrt).to_numpy().reshape(-1, 1) fy = X_train.loc[sel, "LotFrontage"] frontage_model = KNeighborsRegressor(50) frontage_model.fit(fX, fy) fig = plt.figure() axes = fig.add_subplot(111) axes.scatter(fX, fy, s=1) x = np.linspace(0.0, 300.0) axes.plot(x, frontage_model.predict(x.reshape(-1, 1)), c="C1") axes.set_xlim(x[0], x[-1]) axes.set_xlabel("sqrt(LotArea)") axes.set_ylabel("LotFrontage") fig for X in [X_train, X_test]: sel = X["LotFrontage"].isna() X.loc[sel, "LotFrontage"] = frontage_model.predict( X.loc[sel, "LotArea"].apply(np.sqrt).to_numpy().reshape(-1, 1) ) # Lastly, column `MSSubClass` actually holds categorical data. Convert it accordingly. for X in [X_train, X_test, X_all]: X["MSSubClass"] = X["MSSubClass"].astype("category") numer_features.remove("MSSubClass") # ## Categorical features cat_features = set(X_train.select_dtypes(include=["category", "object"]).columns) cat_features for feature in sorted(cat_features): print(feature, set(X_all[feature].unique())) # There is a number of ordinal features, which can be converted into numbers in a straightforward manner. Examples are the various quality measures, such as `ExterQual` and `KitchenQual`. Although it's hard to guess what does ‘TA’ mean. Let's see how the quality label correlate with the price for a few features: y_train.groupby("BsmtCond").mean().sort_values() y_train.groupby("HeatingQC").mean().sort_values() # It seems ‘TA’ means average (‘typical’?) quality. Let's convert all quality features to numbers. Map NaNs to the neutral value. converted_cat_features = set() def convert_features(features, mapping): if isinstance(features, str): features = [features] for feature in features: for X in [X_train, X_test]: X[feature] = X[feature].map(mapping) converted_cat_features.update(features) quality_features = set(X_train.columns[X_train.isin(["TA", "Ex"]).any()]) quality_transform = {"Po": -2, "Fa": -1, "TA": 0, "Gd": 1, "Ex": 2, np.nan: 0} convert_features(quality_features, quality_transform) # Proceed with other straightforward cases: convert_features( "Alley", {"Grvl": 1, "Pave": 2, np.nan: 0} ) # Assume NaN means no alley convert_features("CentralAir", {"N": 0, "Y": 1}) convert_features( "GarageFinish", {"Unf": 0, "RFn": 1, "Fin": 2, np.nan: 0} ) # 'RFn' for 'rough finish'? convert_features("PavedDrive", {"N": 0, "P": 1, "Y": 2}) # 'P' for 'partial'? convert_features("Street", {"Grvl": 0, "Pave": 1}) convert_features( "LandSlope", {"Gtl": 0, "Mod": 1, "Sev": 2} ) # Gentle, moderate, severe convert_features("BsmtExposure", {"No": 0, "Mn": 1, "Av": 2, "Gd": 3, np.nan: 0}) # Drop features for which the most frequent value is found in more than 90% of instances. drop_features = set() for feature in sorted(cat_features): counts = X_train[feature].value_counts(dropna=False).sort_values(ascending=False) if counts.iloc[0] / len(X_train) > 0.9: drop_features.add(feature) print(drop_features) for X in [X_train, X_test]: X.drop(columns=drop_features, inplace=True) # Fill all NaNs in the remaning categorical columns: for X in [X_train, X_test, X_all]: for feature in [ "BsmtFinType1", "BsmtFinType2", "Exterior1st", "Exterior2nd", "Fence", "GarageType", "MSZoning", "MasVnrType", "SaleType", ]: X[feature].fillna("None", inplace=True) # Check remaining categorical features: for feature in sorted(cat_features - converted_cat_features - drop_features): print(feature, sorted(list(X_all[feature].unique()))) # I will mostly use target encoding for them. Here I apply one-hot encoding to `BldgType` just to have an example of it: one_hot_features = ["BldgType"] one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse=False) one_hot_encoder.fit(X_train[one_hot_features]) def one_hot_encode(X): t = pd.DataFrame(one_hot_encoder.transform(X[one_hot_features])) t.index = X.index t.columns = one_hot_encoder.get_feature_names(one_hot_features) return X.drop(columns=one_hot_features).join(t) X_train = one_hot_encode(X_train) X_test = one_hot_encode(X_test) one_hot_features = set(one_hot_features) # To the remaining categorical features, I apply the [target encoding](https://maxhalford.github.io/blog/target-encoding-done-the-right-way/), implemented as a combination of the versions with cross-validation and additive smoothing. The training set is split into $k$ folds, and all examples of category $c$ within the fold $i$ are assigned the value # $$t_c = \frac{n_c \mu_c + n_\text{reg} \mu}{n_c + n_\text{reg}},$$ # where $n_c$ and $\mu_c$ are the number of examples of category $c$ in the remaining $k - 1$ folds and the mean value of the target computed over them, $\mu$ is the global mean value of the target computed with all examples in the training set regardless of their categories, and $n_\text{reg}$ is a regularization parameter that pushes $t_c$ closer to $\mu$ for underpopulated categories. On the test set, $t_c$ is set to # $$t_c^\text{test} = \frac{\frac{k - 1}{k} n_c \mu_c + n_\text{reg} \mu}{\frac{k - 1}{k} n_c + n_\text{reg}},$$ # where $n_c$ and $\mu_c$ are now computed over the whole training set, and the factor $(k - 1) / k$ sets the same balance between $\mu_c$ and $\mu$ as used in the training. # Since this is a supervised encoding, I only define here the transformer class and will apply it in the training pipeline. This way the potential overfitting due to the target encoding won't affect the cross-validation estimate of the error of the regression model. class TargetEncoder(BaseEstimator, TransformerMixin): def __init__(self, columns, k_folds=5, n_reg=10, random_state=None): self.columns = columns self.k_folds = k_folds self.n_reg = n_reg self.random_state = random_state self._test_transform = {} self._global_mean = None def fit(self, X, y=None): raise NotImplemented("Only fit_transform is supported.") def fit_transform(self, X, y): """Fit and apply encoding.""" X_trans = X.copy() self._test_transform = {} self._global_mean = y.mean() for column in self.columns: X_trans[ column ] = 0.0 # Needed to prevent type error when assigning to a categorical column in Pandas self._test_transform[column] = self._build_replace_map( y.groupby(X[column]).agg(["count", "mean"]), test=True ) kfold = KFold( n_splits=self.k_folds, shuffle=True, random_state=self.random_state ) for source_indices, update_indices in kfold.split(X): replace_map = self._build_replace_map( y.iloc[source_indices] .groupby(X[column].iloc[source_indices]) .agg(["count", "mean"]) ) X_trans.iloc[update_indices, X_trans.columns.get_loc(column)] = ( X[column] .iloc[update_indices] .apply(lambda cat: replace_map.get(cat, self._global_mean)) ) return X_trans def transform(self, X, y=None): """Apply encoding on test set.""" X_trans = X.copy() for column in self.columns: X_trans[column] = X[column].apply( lambda cat: self._test_transform[column].get(cat, self._global_mean) ) return X_trans def _build_replace_map(self, stats, test=False): """Build replacement map for a single feature. Arguments: stats: DataFrame whose index is categories of this feature and columns are the number of examples and mean value of the target in each category. test: Boolean distinguishing training and test sets. Return value: Map from category to its numeric representation. """ replace_map = {} for category in stats.index: if isinstance(category, int): # Work-around for this bug: https://github.com/pandas-dev/pandas/issues/17569 n_c, mu_c = stats.loc[pd.CategoricalIndex([category])].iloc[0] else: n_c, mu_c = stats.loc[category] if test: n_c *= (self.k_folds - 1) / self.k_folds replace_map[category] = (n_c * mu_c + self.n_reg * self._global_mean) / ( n_c + self.n_reg ) return replace_map target_encoder_features = ( cat_features - converted_cat_features - drop_features - one_hot_features ) target_encoder = TargetEncoder(target_encoder_features, random_state=7210) # # Regression # In contrast to what is claimed in the [description](https://www.kaggle.com/c/home-data-for-ml-course/overview/evaluation) of the competition, the scoring is based on the mean absolute error (mentioned [here](https://www.kaggle.com/c/home-data-for-ml-course/discussion/105838) and confirmed by comparing the score computed locally against the one reported in the leaderboard). # Cannot use the built-in pipeline because `TargetEncoder` does not provide method `fit` and the regressor used does not provide `fit_predict`. def cross_validate(regressor, cv=5, drop_features=[]): scores = [] feature_importances = np.zeros(X_train.shape[1] - len(drop_features)) kfold = KFold(cv, shuffle=True, random_state=3404) for train_indices, test_indices in kfold.split(X_train): X = X_train.iloc[train_indices] y = y_train.iloc[train_indices] X = target_encoder.fit_transform(X, y) regressor.fit(X.drop(columns=drop_features), y) X = X_train.iloc[test_indices] y = y_train.iloc[test_indices] X = target_encoder.transform(X, y) y_pred = regressor.predict(X.drop(columns=drop_features)) scores.append(mean_absolute_error(y, y_pred)) feature_importances += regressor.feature_importances_ return np.asarray(scores), feature_importances / cv regressor = RandomForestRegressor(n_estimators=1000, random_state=8266) scores, feature_importances = cross_validate(regressor) print("{} +- {}".format(scores.mean(), scores.std())) feature_importances = list(zip(X_train.columns, feature_importances)) feature_importances.sort(key=itemgetter(1), reverse=True) for i, (feature, importance) in enumerate(feature_importances): print("{:2d} {:17} {:.3f}".format(i, feature, importance)) top_n = 30 drop_features = list(map(itemgetter(0), feature_importances[top_n:])) scores, _ = cross_validate(regressor, drop_features=drop_features) print("{} +- {}".format(scores.mean(), scores.std())) # | Num. features | CV score | # |---|---| # | 10 | 18389 &pm; 571 | # | 20 | 17589 &pm; 344 | # | 30 | 17297 &pm; 236 | # | 40 | 17261 &pm; 257 | # | 50 | 17309 &pm; 198 | # With the full set of features, tried transforming the target as $y \mapsto \tilde y = y^\alpha$ to reduce its spread. This can reduce the mean score a bit, but at the price of increasing the variance. Decided not to apply such a transformation. # | &alpha; | CV score | # |---|---| # | 0.4 | 17338 +- 472 | # | 0.6 | 17212 +- 526 | # | 0.75 | 17156 +- 446 | # | 0.85 | 17215 +- 371 | # | 1 | 17274 +- 193 | # CV scores in different setups: # * **17888 \[*V3\]** Random forest with all numeric features. # * **17486 \[*V5\]** (public LB 15916) Random forest with all numeric features. Categorical features with little diversity dropped, ordinal features encoded as numbers, one-hot encoding applied to nominal features. # * **17274 \[V1\]** (public LB 15588) Target encoder for most of categorical features. # * **17297 \[V2\]** Only keep top-30 features. # # Submission X = target_encoder.fit_transform(X_train, y_train) X = X.drop(columns=drop_features) regressor.fit(X, y_train) train_predictions = regressor.predict(X) print("Train error:", mean_absolute_error(train_predictions, y_train)) X = target_encoder.transform(X_test) X = X.drop(columns=drop_features) test_predictions = regressor.predict(X) output = pd.DataFrame({"Id": X_test.index, "SalePrice": test_predictions}) output.to_csv("submission.csv", index=False)
# # Introduction # The purpose of this kernel is to build a predictive model in domain of anomaly detection using Tennessee Eastman Process Simulation Dataset. # I will use some statistical and neural network approaches. import numpy as np import pandas as pd import pyreadr # # Data preparation # The original dataset consists of 4 RData files with train and test parts for normal and faulty processes descriptions. # Let's convert them and concat in order to get two pandas dataframes for train and test data. train_df = pd.concat( [ pyreadr.read_r( "/kaggle/input/tennessee-eastman-process-simulation-dataset/TEP_FaultFree_Training.RData" )["fault_free_training"], pyreadr.read_r( "/kaggle/input/tennessee-eastman-process-simulation-dataset/TEP_Faulty_Training.RData" )["faulty_training"], ] ) test_df = pd.concat( [ pyreadr.read_r( "/kaggle/input/tennessee-eastman-process-simulation-dataset/TEP_FaultFree_Testing.RData" )["fault_free_testing"], pyreadr.read_r( "/kaggle/input/tennessee-eastman-process-simulation-dataset/TEP_Faulty_Testing.RData" )["faulty_testing"], ] ) train_df.head() train_df.head() test_df.head()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # # Loading Data # Here, we'll load the data in data frames of pandas. train_csv = "/kaggle/input/Kannada-MNIST/train.csv" test_csv = "/kaggle/input/Kannada-MNIST/test.csv" val_csv = "/kaggle/input/Kannada-MNIST/Dig-MNIST.csv" train_df = pd.read_csv(train_csv) test_df = pd.read_csv(test_csv) valid_df = pd.read_csv(val_csv) # Just checking out how does the data frame looks valid_df.head() # # Visualizing Data # Here, we're seeing how many samples are present there for each number of classes. To avoid any bias by the network, we'll balance the data by augumentation in case there are no approximately equal number of samples per class import seaborn as sns sns.distplot(train_df["label"], kde=False) # Looks like there are exactly equal number of samples per class. Thus, we don't need any data augumentation to overcome biasing. # # Formatting Data X_train = train_df.drop("label", axis=1).values y_train = train_df["label"].values X_val = valid_df.drop("label", axis=1).values y_val = valid_df["label"].values # ### Reshaping data in (28, 28, 1) dims X_train = X_train.reshape(X_train.shape[0], 28, 28) X_val = X_val.reshape(X_val.shape[0], 28, 28) X_train = np.expand_dims(X_train, axis=3) X_val = np.expand_dims(X_val, axis=3) n_classes = 10 # 0 through 9 # Converting labels to one-hot encoding from tensorflow.keras.utils import to_categorical y_train = to_categorical(y_train) y_val = to_categorical(y_val) # ### Increasing number of samples # Using ImageDataGenerator here. # Ref: Link from tensorflow.keras.preprocessing.image import ImageDataGenerator image_data_gen = ImageDataGenerator( rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180) zoom_range=0.1, # Randomly zoom image width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=False, # randomly flip images vertical_flip=False, ) # randomly flip images image_data_gen.fit(X_train) image_data_gen.fit(X_val) print(X_train.shape) print(y_train.shape) print(X_val.shape) print(y_val.shape) img_shape = (28, 28, 1) # # Model Definition from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, Conv2D, MaxPool2D, Flatten, Dropout, BatchNormalization, ) from tensorflow.keras.optimizers import Adam # Model Ref: Link model = Sequential() model.add( Conv2D( filters=128, kernel_size=(3, 3), padding="Same", activation="relu", input_shape=(28, 28, 1), ) ) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.15)) model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="Same", activation="relu")) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.15)) model.add(Conv2D(filters=32, kernel_size=(3, 3), padding="Same", activation="relu")) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.15)) model.add(Flatten()) model.add(Dense(256, activation="relu")) model.add(Dropout(0.4)) model.add(Dense(10, activation="softmax")) model.compile( loss="categorical_crossentropy", optimizer=Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999), metrics=["accuracy"], ) model.summary() # # Training model.fit_generator( image_data_gen.flow(X_train, y_train, batch_size=64), steps_per_epoch=len(X_train) // 32, epochs=10, validation_data=(image_data_gen.flow(X_val, y_val)), ) # # Testing on Test Dataset test_df.head() x_test = test_df.drop("id", axis=1).values.reshape(len(test_df), 28, 28, 1) predictions = model.predict(x_test) submit_df = pd.DataFrame(columns=["id", "label"]) submit_df.index.name = "id" for index, pred in enumerate(predictions): df = pd.DataFrame({"id": [index], "label": [pred.argmax()]}) submit_df = submit_df.append(df) submit_df.head() submit_df.to_csv("./submission.csv", index=False)
import warnings warnings.filterwarnings("ignore") # Utils from os import path # Data import numpy as np import pandas as pd # Viz import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns from PIL import Image from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator sns.set(style="darkgrid") print(f"Matplotlib Version : {mpl.__version__}") data_path = "../input/" test = pd.read_csv(data_path + "titanic/test.csv") train = pd.read_csv(data_path + "titanic/train.csv") # df = pd.read_csv(data_path+'imdb.csv',error_bad_lines=False); df = train # ### 1. Viz training dataset df df.columns = [i.lower() for i in df.columns] df.head(3) sns.set(style="white", palette="muted", color_codes=True) f, axes = plt.subplots(3, 2, figsize=(14, 14)) sns.despine(left=True) # hides the border sns.countplot(x="pclass", data=df, hue="sex", ax=axes[0, 0]) sns.boxplot(x="age", hue="sex", data=df, ax=axes[0, 1]) sns.countplot(x="survived", data=df, hue="sex", ax=axes[1, 0]) sns.swarmplot(x="embarked", y="pclass", data=df, ax=axes[1, 1]) sns.swarmplot(x="survived", y="fare", data=df, ax=axes[2, 0]) sns.boxplot(x="embarked", y="survived", data=df, ax=axes[2, 1]) plt.tight_layout() C, Q, S = ( len(df[df["embarked"] == "C"]), len(df[df["embarked"] == "Q"]), len(df[df["embarked"] == "S"]), ) # ### Route exploration import folium iceburg = [41.7666636, -50.2333324] southampton = [50.909698, -1.404351] cherbourg = [49.630001, -1.620000] queenstown = [51.850334, -8.294286] m = folium.Map(location=iceburg, tiles="Stamen Terrain", zoom_start=3) tooltip = "Click me!" folium.Marker( southampton, popup="<h3>1. Southampton, 10 April 1912 </h3> <i> Titanic successfully arrives at Southampton shortly after midnight</i>", tooltip=tooltip, ).add_to(m) folium.Marker( cherbourg, popup="<h3>2.Cherbourg, 10 April 1912 </h1>", tooltip=tooltip ).add_to(m) folium.Marker( queenstown, popup="<h3>3.Queenstown, 11 April 1912 </h1> ", tooltip=tooltip ).add_to(m) # https://latitude.to/articles-by-country/general/942/sinking-of-the-rms-titanic folium.Marker( iceburg, popup="<h3>4. Crash - 15 April 1912 </h3>", tooltip=tooltip, icon=folium.Icon(color="red", icon="info-sign"), ).add_to(m) folium.PolyLine([southampton, cherbourg], fill_color="red").add_to(m) folium.PolyLine([cherbourg, queenstown], fill_color="red").add_to(m) folium.PolyLine([queenstown, iceburg], fill_color="red").add_to(m) m plt.rcParams["figure.figsize"] = (7, 7) wordcloud = WordCloud( stopwords=STOPWORDS, background_color="white", width=1000, height=1000, max_words=200, ).generate(" ".join(df["name"])) plt.imshow(wordcloud) plt.axis("off") plt.title("Passenger Names", fontsize=10) plt.show()
sns.set_style("darkgrid") pd.set_option("display.max_columns", 500) train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") train.head() train.info() train.describe() # # Checking # of NA # tmp = pd.isnull(train).sum() # for i in range(len(tmp)): # if tmp[i] != 0: # print('%s %s %d %s (%.2f%s)'%(tmp.index[i],' '*(20-len(tmp.index[i])), # tmp[i], ' '*(6 - len(str(tmp[i]))), # tmp[i]/len(train)*100, '%')) # Checking NA's tmp = pd.isnull(train).sum() tmp = tmp.sort_values(ascending=False) tmp = tmp[tmp > 0] plt.xticks(rotation=90) sns.barplot(x=tmp.index, y=tmp) plt.show() tmp = pd.isnull(test).sum() tmp = tmp.sort_values(ascending=False) tmp = tmp[tmp > 0] plt.xticks(rotation=90) sns.barplot(x=tmp.index, y=tmp) plt.show() # Categorizing variables num_col = [] cat_col = [] for column in train.columns[~pd.Series(train.columns).isin(["Id", "SalePrice"])]: if type(train[column][0]) is not str: num_col.append(column) else: cat_col.append(column) tmp = [ "MSSubClass", "Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature", "MoSold", ] # Handwork ! num_col = list(set(num_col) - set(tmp)) cat_col += tmp for col in tmp: train[col] = train[col].astype(str) # ### Target Variable # skew and log transformation for the target variable fig, ax = plt.subplots(1, 2, figsize=(10, 5)) print("Before > Mean: %f, Standard Deviation: %f" % norm.fit(train["SalePrice"])) sns.distplot(train["SalePrice"], ax=ax[0]) stats.probplot(train["SalePrice"], plot=ax[1]) plt.show() # after log transformation to the target var fig, ax = plt.subplots(1, 2, figsize=(10, 5)) tmp = np.log1p(train["SalePrice"]) # log(1+x) print("After > Mean: %f, Standard Deviation: %f" % norm.fit(tmp)) sns.distplot(tmp, ax=ax[0]) stats.probplot(tmp, plot=ax[1]) plt.show() # vars' corr with the target var col_corr = [] for col in num_col: na_idx = pd.isnull(train[col]) corr = np.corrcoef(x=train[col][~na_idx], y=train["SalePrice"][~na_idx])[0, 1] col_corr.append((col, corr)) col_corr.sort(key=lambda x: -abs(x[1])) col_corr # Top N's scatter plots N = 16 top_N_num = [set[0] for set in col_corr[:N]] fig, ax = plt.subplots(int(np.ceil(N / 2)), 2, figsize=(20, N * 2)) for i, col in enumerate(top_N_num): sns.scatterplot( data=train, x=col, y="SalePrice", alpha=0.4, color="red", ax=ax[i // 2][i % 2] ) ax[i // 2][i % 2].set_xlabel(col, fontsize=18) ax[i // 2][i % 2].set_ylabel("SalePrice", fontsize=18) plt.show() # pairplot: top 5 highest correlated vars and target var N = 5 top_N_num = [set[0] for set in col_corr[:N]] top_N_num.append("SalePrice") plt.figure(figsize=(10, 8)) sns.pairplot(train[top_N_num]) plt.show() # top N's lowest P-value variables col_aov = [] for col in cat_col: result = ols("SalePrice ~ {}".format(col), data=train).fit() aov_table = sm.stats.anova_lm(result) aov_pr = aov_table["PR(>F)"][0] col_aov.append((col, aov_pr)) col_aov.sort(key=lambda x: x[1]) # ascending order of p-value col_aov # Box plot of Top N Categorical Vars N = 16 top_N_num = [set[0] for set in col_aov[:N]] fig, ax = plt.subplots(int(np.ceil(N / 2)), 2, figsize=(20, N * 2)) plt.setp(ax[0][0].get_xticklabels(), rotation=45) for i, col in enumerate(top_N_num): sns.boxplot(x=col, y="SalePrice", data=train, ax=ax[i // 2][i % 2]) ax[i // 2][i % 2].set_xlabel(col, fontsize=18) ax[i // 2][i % 2].set_ylabel("SalePrice", fontsize=18) plt.show() # ### Independant Variables # Correlation of each other plt.figure(figsize=(10, 8)) corr_table = train[num_col].corr() sns.heatmap(corr_table) plt.show() # N highest corr pairs high_cor_list = [] tmp = corr_table[abs(corr_table) > 0.4] for col in tmp.columns: for row in tmp[col][~pd.isnull(tmp[col])].index: if col == row: break high_cor_list.append((col, row, tmp[col][row])) high_cor_list.sort(key=lambda x: -x[2]) high_cor_list # Correlation of each other plt.figure(figsize=(10, 8)) corr_table = test[num_col].corr() sns.heatmap(corr_table) plt.show() # N highest corr pairs high_cor_list = [] tmp = corr_table[abs(corr_table) > 0.4] for col in tmp.columns: for row in tmp[col][~pd.isnull(tmp[col])].index: if col == row: break high_cor_list.append((col, row, tmp[col][row])) high_cor_list.sort(key=lambda x: -x[2]) high_cor_list # ### Outlier # outlier found in the above graph sns.scatterplot(data=train, x="GrLivArea", y="SalePrice") plt.show() # 2 ourliers at the bottom right # outlier checking with univariate anlaysis scaler = StandardScaler() tmp = scaler.fit_transform(train[["SalePrice"]]) tmp_sorted = sorted(np.squeeze(tmp)) for a, b in zip(tmp_sorted[:10], tmp_sorted[-10:]): print("{} {} {}".format(round(a, 5), " " * 10, round(b, 5))) train[tmp > 7] # Be careful of these! # ### Checking Homoscedascity skews = [] for col in num_col: skews.append((col, skew(train[col]))) skews.sort(key=lambda x: -abs(x[1])) skews sns.distplot(train["TotalBsmtSF"]) plt.xlim(0, 10e03) plt.show() # After log transformation on skewed variables( 1. GrLivArea) fig, ax = plt.subplots(1, 2, figsize=(15, 5)) dat = train.copy() dat["SalePrice"] = np.log1p(dat["SalePrice"]) dat["GrLivArea"] = np.log1p(dat["GrLivArea"]) dat["MasVnrArea"] = np.log1p(dat["MasVnrArea"]) sns.scatterplot(data=dat, x="GrLivArea", y="SalePrice", ax=ax[0]) sns.scatterplot(data=dat, x="GarageArea", y="SalePrice", ax=ax[1]) plt.show() # ### Train VS Test fig, ax = plt.subplots(20, 2, figsize=(20, 80)) for i, col in enumerate(num_col): row = int(i / 2) sns.distplot(train[col][~pd.isnull(train[col])], ax=ax[row][0]) sns.distplot(test[col][~pd.isnull(test[col])], ax=ax[row][1]) plt.show() # ### Preprocessing train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") # Removing Outliers train = train.drop( train[(train["GrLivArea"] > 4000) & (train["SalePrice"] < 200000)].index ) # Removing sns.scatterplot(data=train, x="GrLivArea", y="SalePrice") # After plt.show() # Combining test["SalePrice"] = 0 data = pd.concat([train, test], axis=0) data.reset_index(drop=True, inplace=True) # NA Handling data["MSZoning"].fillna(data["MSZoning"].mode()[0], inplace=True) data["Utilities"].fillna(data["Utilities"].mode()[0], inplace=True) data["Exterior1st"].fillna(data["Exterior1st"].mode()[0], inplace=True) data["Exterior2nd"].fillna(data["Exterior2nd"].mode()[0], inplace=True) data["MasVnrType"].fillna("None", inplace=True) data["MasVnrArea"].fillna(0, inplace=True) data["BsmtQual"].fillna("None", inplace=True) data["BsmtCond"].fillna("None", inplace=True) data["BsmtExposure"].fillna("None", inplace=True) data["BsmtFinType1"].fillna("None", inplace=True) data["BsmtFinSF1"].fillna(0, inplace=True) data["BsmtFinType2"].fillna("None", inplace=True) data["BsmtFinSF2"].fillna(0, inplace=True) data["BsmtUnfSF"].fillna(0, inplace=True) data["TotalBsmtSF"].fillna(0, inplace=True) data["Electrical"].fillna(data["Electrical"].mode()[0], inplace=True) data["BsmtFullBath"].fillna(0, inplace=True) data["BsmtHalfBath"].fillna(0, inplace=True) data["KitchenQual"].fillna(data["KitchenQual"].mode()[0], inplace=True) data["Functional"].fillna("None", inplace=True) data["GarageType"].fillna("None", inplace=True) data["GarageFinish"].fillna("None", inplace=True) data["GarageYrBlt"].fillna(data["GarageYrBlt"].mode()[0], inplace=True) data["GarageArea"].fillna(0, inplace=True) data["GarageCars"].fillna(0, inplace=True) data["GarageQual"].fillna("None", inplace=True) data["SaleType"].fillna(data["SaleType"].mode()[0], inplace=True) data["GarageCond"].fillna("None", inplace=True) data["PoolQC"].fillna("None", inplace=True) data["Fence"].fillna("None", inplace=True) data["MiscFeature"].fillna("None", inplace=True) data["LotFrontage"].fillna(0, inplace=True) data["Alley"].fillna("None", inplace=True) data["FireplaceQu"].fillna("None", inplace=True) # Dropping columns with too many NAs cols = [ "MiscFeature", "Alley", "PoolQC", "Fence", "FireplaceQu", "GarageCond", "LotFrontage", ] data.drop(cols, axis=1, inplace=True) # Adding/Revising Columns # Num Columns data["Total_SF"] = data["TotalBsmtSF"] + data["1stFlrSF"] + data["2ndFlrSF"] data["Total_Bath"] = ( data["BsmtFullBath"] + data["BsmtHalfBath"] * 0.5 + data["FullBath"] + data["HalfBath"] * 0.5 ) data["Total_Footage"] = ( data["BsmtFinSF1"] + data["BsmtFinSF2"] + data["1stFlrSF"] + data["2ndFlrSF"] ) data["Age"] = 2020 - data["YearBuilt"] data["Age_rmd"] = 2020 - data["YearRemodAdd"] data["Age_Garage"] = 3020 - data["GarageYrBlt"] data["qul_grliv"] = data["OverallQual"] * data["GrLivArea"] data["garage"] = data["GarageCars"] * data["GarageArea"] data["Years"] = data["YearBuilt"] + data["YearRemodAdd"] + data["GarageYrBlt"] data["Age_not_rmd"] = data["Age"] - data["Age_rmd"] data["Overall"] = data["OverallQual"] + data["OverallCond"] data["porch"] = data["OpenPorchSF"] + data["EnclosedPorch"] + data["ScreenPorch"] data["haspool"] = data["PoolArea"].apply(lambda x: 1 if x > 0 else 0) data["has2ndfloor"] = data["2ndFlrSF"].apply(lambda x: 1 if x > 0 else 0) data["hasgarage"] = data["GarageArea"].apply(lambda x: 1 if x > 0 else 0) data["hasbsmt"] = data["TotalBsmtSF"].apply(lambda x: 1 if x > 0 else 0) data["hasfireplace"] = data["Fireplaces"].apply(lambda x: 1 if x > 0 else 0) # Cat Columns # data['Ext_Kit'] = data['ExterQual'] + data['KitchenQual'] # data['Bsm_Heat'] = data['BsmtQual'] + data['HeatingQC'] # data['Gara_TP1'] = data['GarageFinish'] + data['BsmtFinType1'] data["Season"] = data["MoSold"].apply( lambda x: "Spring" if x <= 3 else ("Summer" if x <= 6 else ("Fall" if x <= 9 else "Winter")) ) data.drop(["MoSold"], axis=1, inplace=True) # Processing Types of Variables to_cat = ["MSSubClass"] data[to_cat] = data[to_cat].astype(str) # Dividing columsn into num and cat num_col = [] cat_col = [] for col in data.columns: if col == "Id" or col == "SalePrice": continue elif data[col].dtype == "object": cat_col.append(col) else: num_col.append(col) print(len(num_col)) print(len(cat_col)) print(len(data.columns)) # Label Encoding & Dummy # labels_col = ['LotShape', 'LandContour', 'LandSlope', 'HouseStyle', 'ExterQual', # 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', # 'BsmtFinType2', 'HeatingQC', 'CentralAir', 'KitchenQual', 'Functional', # 'GarageFinish', 'GarageQual', 'PavedDrive'] # for col in labels_col: # data[col] = pd.factorize(data[col])[0] # data['LotShape'] = data['LotShape'].apply(lambda x: {'IR3':0, 'IR2':1, 'IR1':2, 'Reg':3}[x]) # data['LandContour'] = data['LandContour'].apply(lambda x: {'Low':0, 'HLS':1, 'Bnk':2, 'Lvl':3}[x]) # data['LandSlope'] = data['LandSlope'].apply(lambda x: {'Gtl':0, 'Mod':1, 'Sev':2}[x]) # data['HouseStyle'] = data['HouseStyle'].apply(lambda x: {'1Story':0, '1.5Unf':1, '1.5Fin':2, # '2Story':3, '2.5Unf':4, '2.5Fin':5, # 'SFoyer':6, 'SLvl':7}[x]) # data['CentralAir'] = data['CentralAir'].apply(lambda x: {'N':0, 'Y':1}[x]) # data['Functional'] = data['Functional'].apply(lambda x: {'Sal':0, 'Sev':1, 'Maj2':2, 'Maj1':3, 'Mod':4, # 'Min2':5, 'Min1':6, 'Typ':7, 'None':-99}[x]) # data['BsmtExposure'] = data['BsmtExposure'].apply(lambda x: {'No':0, 'Mn':1, 'Av':2, 'Gd':3, 'None':-99}[x]) # data['GarageFinish'] = data['GarageFinish'].apply(lambda x: {'Unf':0, 'RFn':1, 'Fin':2, 'None':-99}[x]) # data['PavedDrive'] = data['PavedDrive'].apply(lambda x: {'N':0, 'P':1, 'Y':2}[x]) # for col in ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC', 'KitchenQual', 'GarageQual']: # data[col] = data[col].apply(lambda x: {'Po':0, 'Fa':1, 'TA':2, 'Gd':3, 'Ex':4, 'None':-99}[x]) # for col in ['BsmtFinType1', 'BsmtFinType2']: # data[col] = data[col].apply(lambda x: {'Unf':0, 'LwQ':1, 'Rec':2, 'BLQ':3, 'ALQ':4, 'GLQ':5,'None':-99}[x]) for col in cat_col: # for col in list(set(cat_col) - set(labels_col)): data = pd.concat([data, pd.get_dummies(data[col], prefix=(col))], axis=1) data.drop([col], axis=1, inplace=True) # Solving Skew Problem skews = [] for col in num_col: skews.append((col, skew(data[col]))) skews.sort(key=lambda x: -abs(x[1])) for col, value in skews: if abs(value) > 0.5: data[col] = boxcox1p(data[col], 0.15) # boxcox data["SalePrice"] = np.log1p(data["SalePrice"]) # log(x+1) ### Dropping columns that have high corr with other vars cols = ["GarageArea", "GarageYrBlt", "TotRmsAbvGrd", "2ndFlrSF", "BsmtFullBath"] data.drop(cols, axis=1, inplace=True) num_col = list(set(num_col) - set(cols)) # Dropping Sparse Columns cols = [] for col in data.columns: major_ratio = data[col].value_counts().iloc[0] / len(data[col]) if major_ratio > 0.999: cols.append(col) data.drop(cols, axis=1, inplace=True) print("# of columns dropped : {}".format(len(cols))) # PCA ! # pca = PCA() # # data[num_col] = pca.fit_transform(data[num_col]) # data[num_col[:4]] = pca.fit_transform(data[num_col])[:,:4] # data.drop(num_col[4:], axis=1, inplace=True) data.shape # Dividing data.reset_index(drop=True, inplace=True) train = data[data["Id"] <= 1460] test = data[data["Id"] > 1460] X = list(set(train.columns) - set(["SalePrice"])) y = "SalePrice" # ### Baseline Models def rmse_cv(model): result = cross_val_score( cv=5, estimator=model, X=train[X].values, y=train[y].values, scoring="neg_mean_squared_error", ) result = (-result) ** (1 / 2) return np.mean(result), np.std(result) # Scoring Options of 'cross_val_score' function: # 'accuracy', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'average_precision', 'completeness_score', 'explained_variance', # 'f1', 'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted', 'fowlkes_mallows_score', 'homogeneity_score', 'mutual_info_score', # 'neg_log_loss', 'neg_mean_absolute_error', 'neg_mean_squared_error', 'neg_mean_squared_log_error', 'neg_median_absolute_error', # 'normalized_mutual_info_score', 'precision', 'precision_macro', 'precision_micro', 'precision_samples', 'precision_weighted', 'r2', # 'recall', 'recall_macro', 'recall_micro', 'recall_samples', 'recall_weighted', 'roc_auc', 'v_measure_score' def rmsle(pred, actual): result = 0 for i in range(len(pred)): result += (pred[i] - actual[i]) ** 2 result /= len(pred) return result ** (1 / 2) EN = make_pipeline( RobustScaler(), ElasticNet(alpha=1e-3, l1_ratio=0.7, max_iter=1e4) ) # alpha는 l1_ratio에 곱해짐 # EN = ElasticNet(alpha=1e-3, l1_ratio=0.8, max_iter=1e+04) rmse_cv(EN) # # Objective Function of ElasticNet # 1 / (2 * n_samples) * ||y - Xw||^2_2 # + alpha * l1_ratio * ||w||_1 # + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 lasso = make_pipeline(RobustScaler(), Lasso(alpha=5e-04, max_iter=1e04)) # lasso = Lasso(alpha=1e-04, max_iter=1e+04) rmse_cv(lasso) KRR = KernelRidge( alpha=1e-2, kernel="polynomial", degree=1, coef0=2 ) # kernel: 'linear', 'laplacian', 'rbf' rmse_cv(KRR) GBoost = GradientBoostingRegressor( n_estimators=3000, learning_rate=0.05, max_depth=4, max_features="sqrt", min_samples_leaf=30, min_samples_split=10, loss="huber", random_state=5, ) # rmse_cv(GBoost) # corr 높은 변수들 안잘라내니까 확실히 성능떨어짐 model_xgb = xgb.XGBRegressor( colsample_bytree=0.3, gamma=0.0468, learning_rate=0.05, max_depth=3, min_child_weight=1.3, n_estimators=1024, reg_alpha=0.3, reg_lambda=0.4, subsample=0.5, silent=1, random_state=7, nthread=-1, ) # rmse_cv(model_xgb) model_lgb = lgb.LGBMRegressor( objective="regression", num_leaves=4, learning_rate=0.05, n_estimators=1024, max_bin=40, bagging_fraction=0.9, bagging_freq=20, feature_fraction=0.1, # W 0.6 > 0.2 로 낮추니 엄청난효과! feature_fraction_seed=9, bagging_seed=9, min_data_in_leaf=2, min_sum_hessian_in_leaf=1, random_state=5, ) # rmse_cv(model_lgb) def baseline_model(): model = Sequential() model.add( Dense(64, input_dim=len(X), kernel_initializer="normal", activation="relu") ) # model.add(BatchNormalization()) model.add(Dense(8, input_dim=64, kernel_initializer="normal", activation="relu")) model.add(Dense(1, kernel_initializer="normal")) opt = optimizers.Adam(learning_rate=0.005) model.compile(loss="mean_squared_error", optimizer=opt) return model nn = KerasRegressor( build_fn=baseline_model, epochs=3000, batch_size=len(train), verbose=0 ) # rmse_cv(nn) rf = RandomForestRegressor( n_estimators=128, min_samples_split=4, min_samples_leaf=2, random_state=42 ) rmse_cv(rf) rf # # ### Meta Model class Meta_Regressor(BaseEstimator): def __init__(self, base_models, meta_models): self.base_models = base_models # self.A = B 에서 A와 B가 이름이 같아야한다.. 뭐지 self.meta_models = meta_models def fit(self, X, y): self.base_models_ = [[] for _ in self.base_models] self.meta_models_ = clone(self.meta_models) Kf = KFold(n_splits=5, shuffle=True, random_state=5) out_fold_pred = np.zeros((len(X), len(self.base_models))) for i, model in enumerate(self.base_models): for train_idx, val_idx in Kf.split(X): model = clone(self.base_models[i]) model.fit(X[train_idx], y[train_idx]) pred = model.predict(X[val_idx]) out_fold_pred[val_idx, i] = pred self.base_models_[i].append(model) self.meta_models_.fit(X=out_fold_pred, y=y) def predict(self, X): meta_features = np.column_stack( [ np.column_stack([model.predict(X) for model in sub_models]).mean(axis=1) for sub_models in self.base_models_ ] ) scores = self.meta_models_.predict(meta_features) return scores meta_regressor = Meta_Regressor( base_models=[lasso, KRR, GBoost, model_xgb, model_lgb], meta_models=EN ) rmse_cv(meta_regressor) meta_regressor = Meta_Regressor( base_models=[EN, KRR, GBoost, model_xgb, model_lgb], meta_models=lasso ) rmse_cv(meta_regressor) meta_regressor = Meta_Regressor( base_models=[lasso, KRR, GBoost, model_xgb, EN], meta_models=model_lgb ) rmse_cv(meta_regressor) # random_state해서 이제 결과가 완전히 동일하다 # 1. preprocessing에서 뉴컬럼 뺴고 다시 결과값 비교해보기 # 2. 1 결과를 보고 성능 올릴만한 좋은 방법 더 생각해보기 # 3. 앙상블도 밸리데이션 해보기 # ### Average Ensemble class Average_Ensemble(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, models): self.models = models # self.A = B 에서 A와 B가 이름이 같아야한다.. 뭐지 def fit(self, X, y): self.models_ = [clone(model) for model in self.models] for model in self.models_: model.fit(X, y) def predict(self, x): scores = np.column_stack([model.predict(x) for model in self.models_]) return scores.mean(axis=1) average_ensemble = Average_Ensemble(models=[EN, KRR, GBoost, model_xgb, model_lgb]) rmse_cv(average_ensemble) rmse_cv(average_ensemble) # ### Weighted Ensemble with Meta Model class Weighted_Ensemble(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, models): self.models = models def fit(self, X, y): self.models_ = [clone(model) for model in self.models] for model in self.models_: model.fit(X, y) def predict(self, x): results = np.zeros(len(x)) scores = [model.predict(x) for model in self.models_] # for i, model in enumerate(scores): # results += scores[i] * self.weights[i] return scores meta_regressor = Meta_Regressor( base_models=[lasso, KRR, GBoost, model_xgb, model_lgb, nn, rf], meta_models=EN ) weighted_ensemble = Weighted_Ensemble( models=[lasso, KRR, GBoost, model_xgb, model_lgb, nn, rf, EN, meta_regressor] ) train_x, test_x, train_y, test_y = train_test_split( train[X].values, train[y].values, test_size=0.3, random_state=4 ) weighted_ensemble.fit(train_x, train_y) scores = weighted_ensemble.predict(test_x) weights = [0.1, 0.2, 0.2, 0.06, 0.09, 0.01, 0.04, 0.2, 0.1] # # lasso, KRR, GBoost, xgb, lgb, nn ,rf EN meta score = np.zeros(len(scores[0])) for i, model in enumerate(scores): score += scores[i] * weights[i] rmsle(score, test_y) meta_regressor = Meta_Regressor(base_models=[lasso, KRR, GBoost], meta_models=EN) weighted_ensemble = Weighted_Ensemble(models=[model_xgb, model_lgb, meta_regressor]) train_x, test_x, train_y, test_y = train_test_split( train[X].values, train[y].values, test_size=0.3, random_state=4 ) weighted_ensemble.fit(train_x, train_y) scores = weighted_ensemble.predict(test_x) weights = [0.05, 0.05, 0.9] score = np.zeros(len(scores[0])) for i, model in enumerate(scores): score += scores[i] * weights[i] rmsle(score, test_y) # ### Final Prediction # meta_ensemble meta_regressor = Meta_Regressor( base_models=[lasso, KRR, GBoost, model_xgb, model_lgb, nn, rf], meta_models=EN ) weighted_ensemble = Weighted_Ensemble(models=[KRR, GBoost, model_xgb, EN]) weighted_ensemble.fit(train[X].values, train[y].values) weights = [0.2, 0.2, 0.05, 0.55] # scores = weighted_ensemble.predict(test[X].values) score = np.zeros(len(scores[0])) for i, model in enumerate(scores): score += scores[i] * weights[i] pred = np.expm1(score) # meta_regerssor meta_regressor = Meta_Regressor( base_models=[lasso, KRR, GBoost, model_xgb, model_lgb], meta_models=EN ) # the model with the highest Val score meta_regressor.fit(train[X].values, train[y].values) pred = np.expm1(meta_regressor.predict(test[X].values)) submission = pd.DataFrame({"Id": range(1461, 2920), "SalePrice": pred}) submission.to_csv("submission.csv", index=False)
# # Explanation # > This data includes 9 Column # > But variable values not enough. For example ther's not Player Shoot Power, PointCount,AssistCount etc... Therefore some analiysing will be short. # 1. We will firstly study the columns and data # 1. Something analysing with using pandas libraray and we will know pandas more some # 1. After then basic visualization with seaborn library # 1. And lastly we will a good visualization with Pyplot library # > So let's start # [1.Reading data and basicly studying](#1) # [2.Analysing with Pandas](#2) # [3.Basic Visualization with Seaborn](#3) # [4.Last Visualization with PyPlot](#4) # # ## Reading data and basicly studying import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # import plotly.plotly as py from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) import plotly.graph_objs as go from wordcloud import WordCloud import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. nbadata = pd.read_csv( "../input/nba-all-star-game-20002016/NBA All Stars 2000-2016 - Sheet1.csv" ) nbadata.head() nbadata.info() # we have to fix it already if used object columns instead of int or float nbadata.describe() # achives istatistical data ---> ".describe()" --> not a required method yet :) # We'll don't use Selection Type column. Therefore will drop that. nbadata.drop(["Selection Type"], axis=1, inplace=True) nbadata.head() # # ## Analysing with Pandas # Which positions is there in data nbadata.Pos.value_counts() # Filtering DataFrame JamesHarden = nbadata[(nbadata.Player == "James Harden") & (nbadata.Year == 2014)] JamesHarden # > using function shoulnd't be with "def" everytime # for i in age: # print(2020-i) # # OR # [2020-i for i in age] # # > I cant use inch instead of meters OR again pounds instead of kilogram. Therefore i will conversion values in dataframe. # Pounds to -----> KG WTList = list(nbadata["WT"]) KGList = [round(float(i) * 0.45, 1) for i in WTList] # KGList nbadata["KGValues"] = KGList nbadata.head() # I could use apply() method but i like in this way. # Feet and Inch to Cm HTData = list(nbadata["HT"]) CmData = [ round((float(HTData[i][0]) * 12 + float(HTData[i][2])) * 2.54, 1) for i in range(0, len(HTData)) ] # CmData nbadata["CmValues"] = CmData nbadata.head() # Find ratio of cmValues more then 200 to less then 200 in percent MoreThen200 = len(nbadata[nbadata.CmValues > 200]) LessThen200 = len(nbadata[nbadata.CmValues < 200]) # MoreThen200 # LessThen200 # we can look results if we want Ratio = round((MoreThen200 * 100) / LessThen200, 3) Ratio # we find as a result %68 # # ## Visualization with Seaborn Teamsin2016 = nbadata[nbadata.Year == 2016].Team.value_counts() Teamsin2016 Teamsin2016 = list(nbadata[nbadata.Year == 2016].Team.value_counts()) Teamsin2016 # Visualizate Teams Count of NbaAllStar2016 teamdata = list(nbadata[nbadata.Year == 2016].Team.unique()) teamdata # ### Bar Plot data = pd.DataFrame({"team_name_in2016allstar": teamdata, "count_of_team": Teamsin2016}) # visualization plt.figure(figsize=(12, 8)) sns.barplot(x=data.team_name_in2016allstar, y=data.count_of_team) plt.xticks(rotation=90) plt.xlabel("Team Names") plt.ylabel("Count of Teams") plt.title("Team Name in 2016 All Star") plt.show() # ### Pie Plot labels = teamdata explode = np.zeros(18, dtype=int) sizes = Teamsin2016 plt.figure(figsize=(7, 7)) plt.pie(sizes, explode=explode, labels=labels, autopct="%1.1f%%") plt.title("Team Counts Ratio in 2016 All Star", color="blue", fontsize=15) plt.show() # ### Point Plot # > Actually not more fit Point Plot for this data. But I will for repeat and more learn # Team counts of player between of 2010 and 2016 year_list0 = list(nbadata.Year.unique()) year_list = year_list0[:7] year_list = year_list[::-1] hawks = [] lakers = [] bulls = [] clippers = [] clevland = [] for i in year_list: x = nbadata[nbadata.Year == i] hawks.append(len(x[x.Team == "Atlanta Hawks"])) lakers.append(len(x[x.Team == "Los Angeles Lakers"])) bulls.append(len(x[x.Team == "Chicago Bulls"])) clippers.append(len(x[x.Team == "Los Angeles Clippers"])) clevland.append(len(x[x.Team == "Cleveland Cavaliers"])) df = pd.DataFrame( { "Atlanta Hawks": hawks, "Los Angeles Lakers": lakers, "Chicago Bulls": bulls, "Los Angeles Clippers": clippers, "Cleveland Cavaliers": clevland, "Year": year_list, } ) # Visualization f, ax1 = plt.subplots(figsize=(16, 10)) sns.pointplot(x="Year", y="Atlanta Hawks", data=df, color="orange", alpha=0.9) sns.pointplot(x="Year", y="Los Angeles Lakers", data=df, color="yellow", alpha=0.9) sns.pointplot(x="Year", y="Chicago Bulls", data=df, color="blue", alpha=0.4) sns.pointplot(x="Year", y="Los Angeles Clippers", data=df, color="grey", alpha=0.8) sns.pointplot(x="Year", y="Cleveland Cavaliers", data=df, color="purple", alpha=0.9) plt.text(0, 4, "Atlanta Hawks", color="orange", fontsize=17, style="italic") plt.text(0, 3.8, "Los Angeles Lakers", color="yellow", fontsize=17, style="italic") plt.text(0, 3.6, "Chicago Bulls", color="blue", fontsize=17, style="italic") plt.text(0, 3.4, "Los Angeles Clippers", color="grey", fontsize=17, style="italic") plt.text(0, 3.2, "Cleveland Cavaliers", color="purple", fontsize=17, style="italic") plt.xlabel("Years") plt.ylabel("Players Count in Team") plt.title("Players Count in Team Between of 2010 and 2016", fontsize=20) plt.grid() plt.show() # ### Joint Plot # sns.jointplot("Year", "Atlanta Hawks", data=df, color="orange", ratio=2) sns.jointplot("Year", "Los Angeles Lakers", data=df, color="red", ratio=2) plt.show()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import pandas_profiling as pp df = pd.read_csv("/kaggle/input/wine-quality-dataset/WineQT.csv") df.head() # Removing Id as it wont help in Model building df.pop("Id") df.head() df.describe() df.info() # Using Pandas Profiling pp.ProfileReport(df) # Using Pairplot to get knowledge about data sns.pairplot(df, hue="quality", height=3) # Removing duplicate values from the dataframe df.drop_duplicates() # Plotting Histograms instead of Boxplots to get visual intuition of outliers as data is less sns.histplot(x=df["fixed acidity"]) sns.histplot(x=df["volatile acidity"]) sns.histplot(x=df["citric acid"]) sns.histplot(x=df["residual sugar"]) sns.histplot(x=df["chlorides"]) # Removing Outliers df.drop(df[df["chlorides"] >= 0.3].index, inplace=True) df.describe() df.info() # Removing Outliers df.drop(df[df["residual sugar"] >= 10].index, inplace=True) sns.histplot(x=df["residual sugar"]) df.info() sns.histplot(x=df["free sulfur dioxide"]) sns.histplot(x=df["total sulfur dioxide"]) # Removing Outliers df.drop(df[df["total sulfur dioxide"] >= 250].index, inplace=True) df.drop(df[df["free sulfur dioxide"] >= 60].index, inplace=True) df.info() sns.histplot(x=df["density"]) sns.histplot(x=df["pH"]) sns.histplot(x=df["sulphates"]) # Removing Outliers df.drop(df[df["sulphates"] >= 1.5].index, inplace=True) df.info() sns.histplot(x=df["alcohol"]) # # **Model Building** # Removing features with low correlation with respect to Quality since it wont help in model building df.corr() df.pop("pH") df.pop("residual sugar") df.pop("free sulfur dioxide") df.corr() from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split Y = df.pop("quality") X = df X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25) X_train Y_train # ## **Random Forest** rf = RandomForestRegressor(max_depth=15, n_estimators=80) rf.fit(X_train, Y_train) rf.score(X_train, Y_train) rf.score(X_test, Y_test)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import numpy as np import pandas as pd import matplotlib.pyplot as plt import os wd = "/kaggle/input/jamp-hackathon-drive-1/train_set/" images_dir = os.listdir(wd) data = [] for i in images_dir: path = os.path.join(wd, i) path_list = os.listdir(path) for j in path_list: data.append(os.path.join(path, j)) df = pd.DataFrame() df["Images"] = data classes = df["Images"].str.split("/", n=6, expand=True)[5] df["Label"] = classes df = df.sample(frac=1).reset_index(drop=True) df.head() df["Label"].value_counts() ## train test split from sklearn.model_selection import train_test_split train, test = train_test_split(df, test_size=0.2, stratify=df["Label"]) sample = plt.imread(train["Images"].iloc[0]) plt.imshow(sample) sample.shape # importing required libraries from keras.models import Sequential get_ipython().magic("matplotlib inline") import matplotlib.pyplot as plt import keras from keras.layers import Dense from keras.applications.vgg16 import VGG16 from keras.preprocessing import image from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import decode_predictions from tqdm import tqdm import pickle train_img = [] for i in tqdm(df["Images"]): temp_img = image.load_img(i, target_size=(224, 224)) temp_img = image.img_to_array(temp_img) train_img.append(temp_img) # converting train images to array and applying mean subtraction processing train_img = np.array(train_img) train_img = preprocess_input(train_img) test_wd = "/kaggle/input/jamp-hackathon-drive-1/test_set/" test_dir = os.listdir(test_wd) test_data = [] for i in test_dir: test_data.append(os.path.join(test_wd, i)) test_df = pd.DataFrame() test_df["Images"] = test_data test_df.head() test_img = [] for i in tqdm(test_df["Images"]): temp_img = image.load_img(i, target_size=(224, 224)) temp_img = image.img_to_array(temp_img) test_img.append(temp_img) test_img = np.array(test_img) test_img = preprocess_input(test_img) model = VGG16(weights="imagenet", include_top=False) train_img.shape, test_img.shape features_train = model.predict(train_img) features_test = model.predict(test_img) features_train.shape, features_test.shape train_x = features_train.reshape(1027, -1) test_x = features_test.reshape(256, -1) # converting target variable to array train_y = np.asarray(df["Label"]) # performing one-hot encoding for the target variable train_y = pd.get_dummies(train_y) train_y = np.array(train_y) # creating training and validation set from sklearn.model_selection import train_test_split X_train, X_valid, Y_train, Y_valid = train_test_split( train_x, train_y, test_size=0.3, random_state=42 ) test_y = np.asarray(test["Label"]) test_y = pd.get_dummies(test_y) test_y = np.array(test_y) train_x.shape, train_y.shape, test_x.shape, test_y.shape # creating a mlp model from keras.layers import Dense, Activation model = Sequential() model.add(Dense(1000, input_dim=25088, activation="relu", kernel_initializer="uniform")) keras.layers.core.Dropout(0.3, noise_shape=None, seed=None) model.add(Dense(500, input_dim=1000, activation="sigmoid")) keras.layers.core.Dropout(0.4, noise_shape=None, seed=None) model.add(Dense(150, input_dim=500, activation="sigmoid")) keras.layers.core.Dropout(0.2, noise_shape=None, seed=None) model.add(Dense(units=2)) model.add(Activation("softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit( X_train, Y_train, epochs=20, batch_size=128, validation_data=(X_valid, Y_valid) ) model.predict(X_valid) model.evaluate(X_valid, Y_valid) scores = model.evaluate(test_x, test_y) print(f"Accuracy is {scores[1]*100} %") output = np.argmax(model.predict(test_x), axis=1) submission = pd.DataFrame() submission["name"] = ( test_df["Images"] .str.split("/", n=6, expand=True)[5] .str.split(".", n=6, expand=True)[0] ) submission["class"] = output submission.head() submission.to_csv("submission.csv", index=False)
from matplotlib import pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import PIL.Image as Image, PIL.ImageDraw as ImageDraw, PIL.ImageFont as ImageFont import random import os import cv2 import gc from tqdm.auto import tqdm import numpy as np import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.optimizers import SGD from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.models import clone_model from sklearn.model_selection import train_test_split from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.utils import plot_model import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import numpy as np import datetime as dt for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) train_data = pd.read_csv("/kaggle/input/bengaliai-cv19/train.csv") train_data = pd.merge( pd.read_parquet(f"/kaggle/input/bengaliai-cv19/train_image_data_0.parquet"), train_data, on="image_id", ).drop(["image_id"], axis=1) train_labels = train_data[ ["grapheme_root", "vowel_diacritic", "consonant_diacritic", "grapheme"] ] train_data = train_data.drop( ["grapheme_root", "vowel_diacritic", "consonant_diacritic", "grapheme"], axis=1 ) def resize(df, size=64, need_progress_bar=True): resized = {} for i in range(df.shape[0]): image = cv2.resize(df.loc[df.index[i]].values.reshape(137, 236), (size, size)) resized[df.index[i]] = image.reshape(-1) resized = pd.DataFrame(resized).T return resized train_data = resize(train_data) / 255 train_data = train_data.values.reshape(-1, 64, 64, 1) model_dict = { "grapheme_root": Sequential(), "vowel_diacritic": Sequential(), "consonant_diacritic": Sequential(), } for model_type, model in model_dict.items(): model.add(Conv2D(64, 7, activation="relu", padding="same", input_shape=[64, 64, 1])) model.add(layers.BatchNormalization(momentum=0.15)) model.add(MaxPooling2D(2)) model.add(Conv2D(128, 3, activation="relu", padding="same")) model.add(Conv2D(128, 3, activation="relu", padding="same")) model.add(MaxPooling2D(2)) model.add(Conv2D(256, 3, activation="relu", padding="same")) model.add(Conv2D(256, 3, activation="relu", padding="same")) model.add(MaxPooling2D(2)) model.add(Flatten()) model.add(Dense(1024, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(512, activation="relu")) model.add(Dropout(0.5)) if model_type == "grapheme_root": model.add(layers.Dense(168, activation="softmax", name="root_out")) elif model_type == "vowel_diacritic": model.add(layers.Dense(11, activation="softmax", name="vowel_out")) elif model_type == "consonant_diacritic": model.add(layers.Dense(7, activation="softmax", name="consonant_out")) model.compile( optimizer="adam", loss=["categorical_crossentropy"], metrics=["accuracy"] ) batch_size = 32 epochs = 5 history_list = [] model_types = ["grapheme_root", "vowel_diacritic", "consonant_diacritic"] for target in model_types: Y_train = train_labels[target] Y_train = pd.get_dummies(Y_train).values x_train, x_test, y_train, y_test = train_test_split( train_data, Y_train, test_size=0.1, random_state=123 ) datagen = ImageDataGenerator() datagen.fit(x_train) history = model_dict[target].fit_generator( datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, validation_data=(x_test, y_test), ) history_list.append(history) plt.figure() for i in range(3): plt.plot( np.arange(0, epochs), history_list[i].history["accuracy"], label="train_accuracy", ) plt.plot( np.arange(0, epochs), history_list[i].history["val_accuracy"], label="val_accuracy", ) plt.title(model_types[i]) plt.xlabel("Epoch #") plt.ylabel("Accuracy") plt.legend(loc="lower right") plt.show() preds_dict = {"grapheme_root": [], "vowel_diacritic": [], "consonant_diacritic": []} target = [] # model predictions placeholder row_id = [] # row_id place holder for i in range(4): print("Parquet: {}".format(i)) df_test_img = pd.read_parquet( "/kaggle/input/bengaliai-cv19/test_image_data_{}.parquet".format(i) ) df_test_img.set_index("image_id", inplace=True) X_test = resize(df_test_img, need_progress_bar=False) / 255 X_test = X_test.values.reshape(-1, 64, 64, 1) for i, p in preds_dict.items(): preds = model_dict[i].predict(X_test) preds_dict[i] = np.argmax(preds, axis=1) for k, id in enumerate(df_test_img.index.values): for i, comp in enumerate(model_types): id_sample = id + "_" + comp row_id.append(id_sample) target.append(preds_dict[comp][k]) del df_test_img del X_test gc.collect() df_sample = pd.DataFrame( {"row_id": row_id, "target": target}, columns=["row_id", "target"] ) df_sample.to_csv("submission.csv", index=False) df_sample.head()
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # Read csv world_location_dataset = pd.read_csv("../input/world-cities-datasets/worldcities.csv") world_location_dataset # Get US state us_location_dataset = world_location_dataset[world_location_dataset["iso2"] == "US"] us_location_dataset # Get top 15 cities based on index of US state selected_cities_location = us_location_dataset.head(15) selected_cities_location # for each cities, get the latitude and longitude only selected_cities_location = selected_cities_location[["city", "lat", "lng"]] selected_cities_location = selected_cities_location.reset_index() selected_cities_location # Calculate distance from/to each cities (Euclidean) def distance(x1, y1, x2, y2): return round(((abs(x2) - abs(x1)) ** 2 + (abs(y2) - abs(y1)) ** 2) ** 0.5, 2) # Create distance matrix dmat = np.zeros((15, 15)) for i in range(15): for j in range(i, 15): if i == j: pass elif i > j: break else: x1, y1 = selected_cities_location.loc[i, ["lat", "lng"]] x2, y2 = selected_cities_location.loc[j, ["lat", "lng"]] calc = distance(x1, y1, x2, y2) dmat[i][j] = calc dmat[j][i] = calc # Visualization Distance Matrix dmat_df = pd.DataFrame(dmat) dmat_df # Visualization fig, ax = plt.subplots() ax.scatter(selected_cities_location["lat"], selected_cities_location["lng"]) for i, txt in enumerate(selected_cities_location["city"]): ax.annotate( txt, (selected_cities_location["lat"][i], selected_cities_location["lng"][i]) ) ax.title.set_text("15 US Cities") ax.set_xlabel("Latitude") ax.set_ylabel("Longitude") # Ref: https://stackoverflow.com/questions/14432557/matplotlib-scatter-plot-with-different-text-at-each-data-point
# # Импорты import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Считаем данные и подготовим их к дальнейшему анализу df = pd.read_csv("../input/lending-club/accepted_2007_to_2018Q4.csv.gz") # df.head() with pd.option_context("display.max_columns", None): display(df.head()) df.info() # Почистим данные # Столбцы со всеми уникальными значениями можем сразу выкинуть, # так как всё уже отображено в sub_grade df = df.drop( [ "id", "policy_code", "out_prncp", "out_prncp_inv", "url", "pymnt_plan", "hardship_flag", "grade", ], axis=1, ) # Столбцы с слишком большим количеством нулевых значений отбрасываются # Чтобы упростить задачу классификации, # столбцы должны быть заполнены хотя бы на 90%, # поэтому сохраняется только около 50 столбцов. # ((df.isnull().sum()) / len(df) * 100).plot.bar(title='Процент пропущенных значений') percent = df.isnull().sum() / len(df) nan_cols = percent.iloc[np.where(np.array(percent) > 0.1)].index print(len(nan_cols), "- удалено") df = df.drop(nan_cols, axis=1) # Посмотрим на `loan_status` sn.countplot(y="loan_status", data=df) # Нас интересуют только просроченные `Charged Off` и полностью погашенные `Fully Paid` кредиты. Собственно, установим 0 и 1, соответсвенно, остальное - выкинем df = df[(df["loan_status"] == "Fully Paid") | (df["loan_status"] == "Charged Off")] df["label"] = df.apply(lambda r: 1 if r["loan_status"] == "Fully Paid" else 0, axis=1) df = df.drop("loan_status", axis=1) df["loan_status"].head(5)
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt gender_submission = pd.read_csv("../input/titanic/gender_submission.csv") test = pd.read_csv("../input/titanic/test.csv") train = pd.read_csv("../input/titanic/train.csv") train["set"] = "train" test["set"] = "test" test.info() test["Survived"] = np.nan data = pd.concat([train, test], sort=True) # finding out missing values missing = data.isna().sum().sort_values(ascending=False) percentage = (data.isna().sum() / data.isna().count()).sort_values(ascending=False) values = pd.concat([missing, percentage], axis=1, keys=("missing", "percentage")) values # cabin variable is not having any signifinace and having a lot of NA's So drop this variable del data["Cabin"] del data["PassengerId"] # EDA sns.countplot(data["Survived"]) # Distribution of survival according to Age sns.boxplot(data["Survived"], data["Age"]) # Distribution of survival according to Age and Gender sns.boxplot(data["Survived"], data["Age"], hue=data["Sex"]) # Distribution of survival according to Fare sns.boxplot(data["Survived"], data["Fare"]) # There seems to be more survivals according to more fare sns.catplot(x="Sex", y="Survived", hue="Pclass", kind="bar", data=data) g = sns.catplot( x="Fare", y="Survived", row="Pclass", kind="box", orient="h", height=1.5, aspect=4, data=data.query("Fare > 0"), ) g.set(xscale="log") # NA treatment for Age # Distribution of age according to Pclass sns.boxplot(data["Pclass"], data["Age"]) # Imputing NA in Age variable data["Age"].fillna( data.groupby(["Sex", "Pclass"])["Age"].transform("median"), inplace=True ) # Check NA in Age variable print(data["Age"].isna().sum()) # Imputing NA in Fare variable data["Fare"].fillna(data.groupby(["Pclass"])["Age"].transform("median"), inplace=True) print(data["Fare"].isna().sum()) # Distribution of Embarked with Pclass sns.countplot(data["Embarked"], hue=data["Pclass"]) # Replace NA in Embarked with highest frequency Pclass data["Embarked"].fillna("S", inplace=True) data = pd.get_dummies(data, columns=["Embarked"], drop_first=True) data = pd.get_dummies(data, columns=["Sex"], drop_first=True) train_set = data[data["set"] == "train"] del train_set["set"] train_set.info() sns.heatmap(train_set.isnull(), yticklabels=False, cbar=False, cmap="viridis") test_set = data[data["set"] == "test"] del test_set["set"] del test_set["Survived"] del train_set["Name"] del test_set["Name"] del train_set["Ticket"] del test_set["Ticket"] # Finally, lets look the correlation of df_train plt.figure(figsize=(15, 12)) plt.title("Correlation of Features for Train Set") sns.heatmap(train_set.astype(float).corr(), vmax=1.0, annot=True) plt.show() # Model Building from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( train_set.drop("Survived", axis=1), train_set["Survived"], test_size=0.30, random_state=101, ) logmodel = LogisticRegression() logmodel.fit(X_train, y_train) predictions = logmodel.predict(X_test) from sklearn.metrics import classification_report print(classification_report(y_test, predictions)) test_predictions = logmodel.predict(test_set) submission = pd.DataFrame( {"PassengerId": test["PassengerId"], "Survived": test_predictions} ) # Convert DataFrame to a csv file that can be uploaded # This is saved in the same directory as your notebook filename = "Titanic_Predictions.csv" submission.to_csv(filename, index=False) print("Saved file: " + filename) from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators=35, max_depth=5, random_state=1) model_rf = classifier.fit(X_train, y_train) prediction_rf = model_rf.predict(X_test) print(classification_report(y_test, prediction_rf)) sub_predict_rf = model_rf.predict(test_set) submission1 = pd.DataFrame( {"PassengerId": test["PassengerId"], "Survived": sub_predict_rf} ) submission1.to_csv("Titanic_prediction1.csv", index=False)
# ## Current & Forecast weather data collection # ### 1. Overview # In this notebook, I am exploring how to summarize and visualize weather forecast information in a way that's easy and useful for people. For example, given all the weather data we could retrieve, which ones are the most relevant to people's everyday lives? What are some easier ways for people to digest and act upon the information? What visualizations or prompts would be helpful? # ### 2. Data Profile # [OpenWeather API](https://openweathermap.org/api) provides a number of weather data collections, including current weather, historical data and forecast. For example, you can search 16 day weather forecasts with daily average parameters by city name, geographic coordinates or ZIP code. All weather data can be obtained in JSON and XML formats. For a Free account, there’s a limited number of no more than 60 calls per minute. In addition, some of the data collections are available only if you buy paid subscription plans. A Free account may access current weather API, 5 days/3 hour forecast API, weather maps 1.0, UV index and weather alerts. # In this mini project, I am specifically exploring the [5 day weather forecast API](https://openweathermap.org/forecast5). import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # ### 3. Analysis # I am taking Seattle as the example city to explore the data collection throughout this mini project. # First of all, I am accessing the API to retrieve data. Given the city name "Seattle", the API responds with weather forecast information for 5 days in every 3 hours, including temperature, pressure, sea level, humidity and other weather conditions. from kaggle_secrets import UserSecretsClient user_secrets = UserSecretsClient() secret_value_0 = user_secrets.get_secret("openWeather") import requests, json api_key = secret_value_0 base_url = "http://api.openweathermap.org/data/2.5/forecast?" city_name = "Seattle" complete_url = base_url + "q=" + city_name + "&appid=" + api_key response = requests.get(complete_url) results = response.json() results # Next, based on the retrieved data, I would summarize the data based on some of the most important questions people have when checking future weather information. The questions are as follows: # 1. Is it going to be warm or cold for the next 5 days? # 2. Is it going to rain for the next 5 days? # 3. If it's going to be cold, will it snow for the next 5 days? # ### Q1. Is it going to be warm or cold for the next 5 days? # Here's a plot of how the temperature and feels-like temperature (in Calcius degree) for the next 5 days will go (temperature in red and feels-like temperature in blue). Interestingly, overall it always feels colder than it really is. from matplotlib.pyplot import plot import pytemperature feels_like_temp = [] for i in range(len(results["list"])): celsius = pytemperature.k2c( results["list"][i]["main"]["feels_like"] ) # convert temperature in Kelvin to Celsius feels_like_temp.append(celsius) temp = [] for i in range(len(results["list"])): celsius = pytemperature.k2c( results["list"][i]["main"]["temp"] ) # convert temperature in Kelvin to Celsius temp.append(celsius) plot(feels_like_temp, color="b") plot(temp, color="r") # ### Q2. Is it going to rain for the next 5 days? # This is an ever-lasting question that every Seattleites ask every single day. Here's a list of the times that would rain in the next 5 days, the severity of rain is identified as well. count = 0 for i in range(len(results["list"])): if "Rain" in results["list"][i]["weather"][0]["main"]: print( "%s at %s" % ( results["list"][i]["weather"][0]["description"], results["list"][i]["dt_txt"], ) ) count = count + 1 if count == 0: print("incredible! no rain in the next 5 days in Seattle!") else: print("remember to bring an umbrella and wear your rain boots!") # ### Q3. If it's going to be cold, will it snow for the next 5 days? # count = 0 for i in range(len(results["list"])): if "Snow" in results["list"][i]["weather"][0]["main"]: print( "%s at %s" % ( results["list"][i]["weather"][0]["description"], results["list"][i]["dt_txt"], ) ) count = count + 1 if count == 0: print("snow day is not happening for the next 5 days!") else: print("stay warm and build a snowman!")
# # Linear regression (predicting a continuous value): # *** Question:** # > Weather in Szeged 2006-2016: Is there a relationship between humidity and temperature? What about between humidity and apparent temperature? Can you predict the apparent temperature given the humidity? import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict import operator # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. weatherHistory = pd.read_csv("../input/szeged-weather/weatherHistory.csv") weatherHistory.head(2) weatherHistory.info() weatherHistory.describe().T # Extract 3 columns 'Temperature (C)','Apparent Temperature (C)', 'Humidity' for pure and better showing weatherHistory_df = weatherHistory[ ["Temperature (C)", "Apparent Temperature (C)", "Humidity"] ] # And called again weatherHistory_df.columns = ["Temperature", "Apparent_Temperature", "Humidity"] weatherHistory_df = weatherHistory_df[:][ :500 ] # lets take limit for speed regression calculating weatherHistory_df.head(2) # See picture with scatter or plot method sns.pairplot(weatherHistory_df, kind="reg") # see how many null values we have weatherHistory_df.isnull().sum() # Features chose y = np.array(weatherHistory_df["Humidity"]).reshape(-1, 1) X = np.array(weatherHistory_df["Apparent_Temperature"]).reshape(-1, 1) # Chosen just 'Apparent_Temperature' feature if you want can also for 'Temperature' feature # Split data as %20 is test and %80 is train set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=42 ) # # 1.Linear Regression from sklearn.linear_model import LinearRegression lin_df = LinearRegression() lin_df.fit(X_train, y_train) y_pred = lin_df.predict(X_test) # Predict Linear Model accuracy_score = lin_df.score(X_test, y_test) # Accuracy score print("Linear Regression Model Accuracy Score: " + "{:.1%}".format(accuracy_score)) from sklearn.metrics import mean_squared_error, r2_score print("R2 Score: " + "{:.3}".format(r2_score(y_test, y_pred))) # Finally draw figure of Linear Regression Model plt.scatter(X_test, y_test, color="r") plt.plot(X_test, y_pred, color="g") plt.show() # # 2.Multiple Linear Regression mlin_df = LinearRegression() mlin_df = mlin_df.fit(X_train, y_train) mlin_df.intercept_ # constant b0 mlin_df.coef_ # variable coefficient y_pred = mlin_df.predict(X_train) # predict Multi linear Reg model rmse = np.sqrt(mean_squared_error(y_train, mlin_df.predict(X_train))) print("RMSE Score for Test set: " + "{:.2}".format(rmse)) print("R2 Score for Test set: " + "{:.3}".format(r2_score(y_train, y_pred))) # this is test error score # ## 2.1.Multiple Linear Regression Model Tunning # cross validation method is giving better and clear result cross_val_score(mlin_df, X, y, cv=10, scoring="r2").mean() mlin_df.score(X_train, y_train) # r2 value np.sqrt( -cross_val_score(mlin_df, X_train, y_train, cv=10, scoring="neg_mean_squared_error") ).mean() # Finally draw figure of Multiple Linear Regression Model plt.scatter(X_train, y_train, s=100) # sort the values of x before line plot sort_axis = operator.itemgetter(0) sorted_zip = sorted(zip(X_train, y_pred), key=sort_axis) X_test, y_pred = zip(*sorted_zip) plt.plot(X_train, y_train, color="r") plt.show() # * This was just for train set and you can also do for test set. # # 3.Polynomial Regression from sklearn.preprocessing import PolynomialFeatures poly_df = PolynomialFeatures(degree=5) transform_poly = poly_df.fit_transform(X_train) linreg2 = LinearRegression() linreg2.fit(transform_poly, y_train) polynomial_predict = linreg2.predict(transform_poly) rmse = np.sqrt(mean_squared_error(y_train, polynomial_predict)) r2 = r2_score(y_train, polynomial_predict) print("RMSE Score for Test set: " + "{:.2}".format(rmse)) print("R2 Score for Test set: " + "{:.2}".format(r2)) plt.scatter(X_train, y_train, s=50) # sort the values of x before line plot sort_axis = operator.itemgetter(0) sorted_zip = sorted(zip(X_train, polynomial_predict), key=sort_axis) X_train, polynomial_predict = zip(*sorted_zip) plt.plot(X_train, polynomial_predict, color="m") plt.show() # * This was just for train set and you can also do for test set. # # 4.Decision Tree Regression from sklearn.tree import DecisionTreeRegressor dt_reg = DecisionTreeRegressor() # create DecisionTreeReg with sklearn dt_reg.fit(X_train, y_train) dt_predict = dt_reg.predict(X_train) # dt_predict.mean() plt.scatter(X_train, y_train, color="red") # scatter draw X_grid = np.arange(min(np.array(X_train)), max(np.array(X_train)), 0.01) X_grid = X_grid.reshape((len(X_grid), 1)) plt.plot(X_grid, dt_reg.predict(X_grid), color="g") # line draw plt.xlabel("Temperature") plt.ylabel("Salinity") plt.title("Decision Tree Model") plt.show() rmse = np.sqrt(mean_squared_error(y_train, dt_predict)) r2 = r2_score(y_train, dt_predict) print("RMSE Score for Test set: " + "{:.2}".format(rmse)) print("R2 Score for Test set: " + "{:.2}".format(r2)) # * This was just for train set and you can also do for test set. # # 5.Random Forest Model from sklearn.ensemble import RandomForestRegressor rf_reg = RandomForestRegressor(n_estimators=5, random_state=0) rf_reg.fit(X_train, y_train) rf_predict = rf_reg.predict(X_train) # rf_predict.mean() plt.scatter(X_train, y_train, color="red") # scatter draw X_grid = np.arange(min(np.array(X_train)), max(np.array(X_train)), 0.01) X_grid = X_grid.reshape((len(X_grid), 1)) plt.plot(X_grid, rf_reg.predict(X_grid), color="b") # line draw plt.xlabel("Temperature") plt.ylabel("Salinity") plt.title("Decision Tree Model") plt.show() rmse = np.sqrt(mean_squared_error(y_train, rf_predict)) r2 = r2_score(y_train, rf_predict) print("RMSE Score for Test set: " + "{:.2}".format(rmse)) print("R2 Score for Test set: " + "{:.2}".format(r2))
# **Objective** # The objective of milestone-1 is to perform data preprocessing and EDA to understand customer churn. The output of milestone-1 will be used in milestones-2 and 3 to build machine learning models to predict customer churn and to create an interactive dashboard for decision making. In order to do that, follow the solution approach given in the capstone project overview and use the learners file for milestone-1 to answer the business questions. # ## Let us start by importing the required libraries # Import the libraries to help with reading and manipulating data import numpy as np import pandas as pd # Libraries to help with data visualization import matplotlib.pyplot as plt import seaborn as sns # Removes the limit for the number of displayed columns pd.set_option("display.max_columns", None) # Sets the limit for the number of displayed rows pd.set_option("display.max_rows", 200) # ## Import the data # Loading the dataset df = pd.read_csv("//kaggle/input/milestone1-dataset/milestone1_dataset.csv") # ## Data Understanding # ### Write a code to display the first and last 5 rows of the dataset. # diplay first 5 ROWS of the dataset df.head() # diplay first 5 ROWS of the dataset df.tail() # ### How many rows and columns are present in the data? # display number of rows and columns df.shape # The dataset has 6499 rows and 21 columns # ### What are the datatypes of the different columns in the dataset? # info : columns , count, non-null and datatype df.info() # Write down the observations as per the information provided by running the above code. # - There are 5 numerical columns and 16 object type columns in the dataset. # - CustomerID can be changed to string or deleted. # - Gender and Seinor_citizen are read as integer but they are categories. # - The other object type columns are categories. # - 9 obserservations has a missing value in Total_charges. # - All the other columns are not null, however we will check for missing value of categorical variables farther in the analysis. # ### Correct the datatype of those columns which are not correct. Also, convert all object datatype columns into category datatypes. # Convert CustomerID to string , also gender and Senior citizen to categories df = df.astype( {"CustomerID": "string", "Gender": "category", "Senior_Citizen": "category"} ) # Create a list of categorical variables to use it further in the analysis. cat_col = ["Gender", "Senior_Citizen"] # Display the type df.dtypes # Convert all the columns object to categories for feature in df.columns: # Loop through all columns in the dataframe if df[feature].dtype == "object": # Only apply for columns Object df[feature] = df[feature].astype("category") # Replace object with category cat_col.append( feature ) # add category column to the list created in the privious step # display Infos df.info() # ### Check the statistical summary of the data # The summary statistics of the data df.describe().T # Write down the observations after running the above code. # - The minimum Tenure is 0, which may mean that customers may not have finished their contract or they may canceled it just after subscription. # - Monthly charge is between 18 and 118 and the average is 65 USD. # - The range of total charges is between 18.80 and 88684.80 USD. # ## Data Preprocessing # ### Check for duplicate entries in the data # Check for duplicate entries in the data df.duplicated().value_counts() # There are no duplicate observations. # ### Check if any discrepancy is present in the categorical column values. # Write the code here for column in cat_col: print(df[column].value_counts()) print("-" * 40) # - The column Internet_Service has 2 variables Fiber-optic and Fiber optic that have the same meaning. so we will correct that. # - No internet service and No phone service are not missing values as it was specifically saying there is no internet service or phone in this area or location # #### Replace the incorrect value with the correct value in the categorical column # Correct the value df.Internet_Service.replace("Fiber-optic", "Fiber optic", inplace=True) # ### Are there any missing values in the data? If so, treat them using an appropriate method. # find the missing values df.isna().sum() # let's visualize observations with missing value in total_charges df.loc[df["Total_Charges"].isna()] # We notice that all this observations have Tenure=0. # Let's also check all observations with tenure=0. df.loc[df["Tenure"] == 0] # The relationship between tenure = 0 and total charge null is one-to-one. Which may means that these customers have not been billed. # so we can replace total_charges with 0. # Replace null by 0 df["Total_Charges"] = df["Total_Charges"].fillna(0) # display the missing values to ensure that there is no missing value df.isna().sum() # #### Let's try to fill in the missing value with the median value of the column. # NB: O seems to be better than median # ##### Now, after handling the missing values and also checking the data types, let's visualise the dataset. # ## Data Visualization # In addition to the observations, perform each of the five relevant univariate and bivariate analyses of different columns with the help of the below functions. # ### Univariate Analysis # Before doing the visualization, please run the below codes: def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to show the density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage plt.show() # show the plot def stacked_barplot(data, predictor, target): """ Print the category counts and plot a stacked bar chart data: dataframe predictor: independent variable target: target variable """ count = data[predictor].nunique() sorter = data[target].value_counts().index[-1] tab1 = pd.crosstab(data[predictor], data[target], margins=True).sort_values( by=sorter, ascending=False ) print(tab1) print("-" * 120) tab = pd.crosstab(data[predictor], data[target], normalize="index").sort_values( by=sorter, ascending=False ) tab.plot(kind="bar", stacked=True, figsize=(count + 5, 5)) plt.legend( loc="lower left", frameon=False, ) plt.legend(loc="upper left", bbox_to_anchor=(1, 1)) plt.show() ### function to plot distributions wrt target def distribution_plot_wrt_target(data, predictor, target): fig, axs = plt.subplots(2, 2, figsize=(12, 10)) target_uniq = data[target].unique() axs[0, 0].set_title("Distribution of target for target=" + str(target_uniq[0])) sns.histplot( data=data[data[target] == target_uniq[0]], x=predictor, kde=True, ax=axs[0, 0], color="teal", stat="density", ) axs[0, 1].set_title("Distribution of target for target=" + str(target_uniq[1])) sns.histplot( data=data[data[target] == target_uniq[1]], x=predictor, kde=True, ax=axs[0, 1], color="orange", stat="density", ) axs[1, 0].set_title("Boxplot w.r.t target") sns.boxplot(data=data, x=target, y=predictor, ax=axs[1, 0], palette="gist_rainbow") axs[1, 1].set_title("Boxplot (without outliers) w.r.t target") sns.boxplot( data=data, x=target, y=predictor, ax=axs[1, 1], showfliers=False, palette="gist_rainbow", ) plt.tight_layout() plt.show() # #### Do the univariate analysis of different integer and categorical variables and write down the observations. # Hint: Plot different visualizations using the above function # #### Tenure : histogram_boxplot(df, "Tenure") # - There are two peaks at both ends of the interval which consist of 30% of the observations. # - The number of tenures with a value 65. # - For the rest the distribution is approximately equal with some slight differences. # - There is no outliers. # #### Monthly charges : histogram_boxplot(df, "Monthly_Charges") # - The distribution is left-skewed. # - There is a peak in the lower bound of the interval, which means that a high number of customers are subscribed to the cheapest services. # - The number of contrats that cost between 75 and 90 is slightly higher. # - There is no outliers. # - The mean and the median are both above 60 $. # #### Total Charges histogram_boxplot(df, "Total_Charges") # - The distribution of total charges is right-skewed. # - Most customers (75 %) have total charges <= 4000. # - There is a peak in the lower bound of the interval, which could be due to tenure and monthly charges. # - There is no outliers. # #### Gender labeled_barplot(df, "Gender") # - Male are more than female. but the difference is not too big. # #### Senior Citizen: labeled_barplot(df, "Senior_Citizen") # There are fewer seniors citizen in the dataset # #### Partner: labeled_barplot(df, "Partner") # There is slight difference between customers with partners and customers without partner. # #### Dependents: labeled_barplot(df, "Dependents") # Most of customers are without dependants. # #### Phone Service: labeled_barplot(df, "Phone_Service") # 90 % of customers have phone service. # #### Multiple Lines: labeled_barplot(df, "Multiple_Lines") # - Nearly 42 % of customers have multiple lines and 48 % don't. The reste (~10 %) doesn't have phone service. # #### Internet Service: labeled_barplot(df, "Internet_Service") # - Approx 44% customers have Fiber optic. # - Approx 34% customers have ADSL. # - The reste ~ 22 % doesn't have Internet sevice. # #### Online Security: labeled_barplot(df, "Online_Security") # - Approx 50% don't have Online security. # - Approx 29% customers have online security. # - The reste ~ 21 % doesn't have Internet sevice. # #### Online Backup: labeled_barplot(df, "Online_Backup") # - Approx 43% don't have Online backup. # - Approx 34% customers have online backup. # - The reste ~ 23 % doesn't have Internet sevice. # #### Device_Protection: labeled_barplot(df, "Device_Protection") # - Approx 43% don't have device protection. # - Approx 34% customers have device protection. # - The reste ~ 23 % doesn't have Internet sevice. # #### Tech_Support : labeled_barplot(df, "Tech_Support") # - Approx 49% don't have Tech support. # - Approx 29% customers have Tech support. # - The reste ~ 22 % doesn't have Internet sevice. # #### Streaming TV : labeled_barplot(df, "Streaming_TV") # - Approx 40% don't have Streaming TV. # - Approx 38% customers have Streaming TV. # - The reste ~ 22 % doesn't have Streaming TV. # #### Streaming_Moviest : labeled_barplot(df, "Streaming_Movies") # - Approx 39% don't have Streaming Movies. # - Approx 38% customers have Streaming movies. # - The reste ~ 2# % doesn't have Internet service. # #### Contract: labeled_barplot(df, "Contract") # - Approx 55% have Month to month contract. # - Approx 24% customers have 2 years contract. # - Approx 20% customers have 1 year contract. # #### Paperless_Billing labeled_barplot(df, "Paperless_Billing") # - Majority of customers are using paperless billing. # #### Payment Method: # - Majority of customers (60 %) has paperless billing. # #### Payment Method : labeled_barplot(df, "Payment_Method") # - Electronic checks is the payment method most used by customers, followed by mailed checks. # - There is no big difference between Bank transfert and Credit card. # #### Churn: labeled_barplot(df, "Churn") # - We have 73% observations for non-churn and 27% observations for churn. # - This variable is our target variable. we will analyze what are the factors that contribute to this result by machine learning. # ### Bivariate Analysis # #### Plot the heatmap. # Visualization with heatmap plt.figure(figsize=(15, 7)) sns.heatmap(df.corr(), annot=True, vmin=-1, vmax=1, fmt=".2f", cmap="Spectral") plt.show() # Observations # - There is a strong correlation between total charges and tenure. # - There is a positive correlation between monthly charges and total charges. it can be considered moderate to strong. # - There is a weak correlation between Monthly Charges and Tenure. # #### Do the bivariate analysis between column name - "Churn" and different integer and categorical variables and write down the observations. # Hint: Plot different visualizations using the above function # #### Monthly Charges and Churn: distribution_plot_wrt_target(df, "Monthly_Charges", "Churn") # - we can see that the density of monthly charges is higher when churn = 'yes'. # - we can also see that The first 3 quartile of the distribution of churn = 'yes' is higher compared to churn = 'no'. # - This shows that high monthly charges are more likely to churn. # #### Total charges and churn: distribution_plot_wrt_target(df, "Total_Charges", "Churn") # - we can see that the distribution of density of total charges is lower when churn = 'yes'. # - we can also see that The distribution of churn = 'yes' is lower compared to churn = 'no'. # - This shows that observations with churn = 'yes' have mostly less total charges, wich is obvious, churn = yes indicates loss of profit. # - There is some outliers for churn='yes'. # #### Tenure and churn: distribution_plot_wrt_target(df, "Tenure", "Churn") # - we can see that the distribution of density of Tenure is higher when churn = 'no'. # - we can see that both Churn = 'yes' and 'no' have peak in tenure between 0 and 5. # - we can see that there is a peak when churn = 'no' for tenure over 65. # - we can also see that The first 3 quartile of the distribution of churn = 'yes' is lower compared to churn = 'no'. # - This shows that observations with churn = 'yes' have mostly lower Tenure, wich is obvious. # - There is some outliers for churn='yes'. # #### Churn Vs Gender stacked_barplot(df, "Gender", "Churn") # There is no difference between the percentage of customers who has Churn ='yes' in both genders males and females. # #### Chrun Vs Senior citizen: stacked_barplot(df, "Senior_Citizen", "Churn") # - Senior citizens have a bigger pourcentage of churn = 'yes'. # #### Churn Vs Partner stacked_barplot(df, "Partner", "Churn") # - Customers without partners have a bigger pourcentage of churn = 'yes'. # #### Churn Vs Dependant stacked_barplot(df, "Dependents", "Churn") # - Customers without dependents have a bigger pourcentage of churn = 'yes'. # #### Churn Vs Phone service: stacked_barplot(df, "Phone_Service", "Churn") # - There is no difference in churn between customers who have phone service and those who don't. # #### Churn Vs Multiple_Lines stacked_barplot(df, "Multiple_Lines", "Churn") # - The churn rate of customers who have multiple lines is slightly higher than the churn rate of customers who don't have one or those who do not have telephone service. # - There is a no difference in churn between customers who don't have phone service and those who don't have multiples lines. # #### Chrun Vs Internet Service: stacked_barplot(df, "Internet_Service", "Churn") # - The churn rate of customers who have fiber optic is higher than the churn rate of customers who have DSL and those who do not have any Internet service. # - The rate of customers who don't have internet service is the lowest. # ### Chrun Vs Online security: stacked_barplot(df, "Online_Security", "Churn") # - The churn rate of customers who don't have online security is higher than the churn rate of customers who have it and those who don't have any Internet service. # - The chrun rate of customers who don't have internet service is the lowest. # ### Chrun Vs Online Backup: stacked_barplot(df, "Online_Backup", "Churn") # - The churn rate of customers who don't have online backup is higher than the churn rate of customers who have it and those who don't have any Internet service. # - The chrun rate of customers who don't have internet service is the lowest. # ### Chrun Vs Device_Protection: stacked_barplot(df, "Device_Protection", "Churn") # - The churn rate of customers who don't have device protection is higher than the churn rate of customers who have it and those who don't have any Internet service. # - The chrun rate of customers who don't have internet service is the lowest. # ### Chrun Vs Tech support: stacked_barplot(df, "Tech_Support", "Churn") # - The churn rate of customers who don't have Tech support is higher than the churn rate of customers who have it and those who don't have any Internet service. # - The chrun rate of customers who don't have internet service is the lowest. # ### Chrun Vs Streaming TV: stacked_barplot(df, "Streaming_TV", "Churn") # - There is no (considerable) difference between the churn rate of customers who have Treaming TV and those who don't. # - The chrun rate of customers who don't have internet service is the lowest. # ### Chrun Vs Streaming Movies: stacked_barplot(df, "Streaming_Movies", "Churn") # - There is no (considerable) difference between the churn rate of customers who have Treaming Movies and those who don't. # - The chrun rate of customers who don't have internet service is the lowest. # ### Churn Vs Contract: stacked_barplot(df, "Contract", "Churn") # - Month to month contract has the highest chrun rate followed by one year contract and then two year. # - The longer the contract, the less the risk of the customer to churn. # #### Churn Vs Paperless_Billing: stacked_barplot(df, "Paperless_Billing", "Churn") # - The chrun rate of customers who choose paperless billing is the highest. # #### Churn Vs Payment method: stacked_barplot(df, "Payment_Method", "Churn") # - The chrun rate of customers who paid with electronic check is the highest followed by mailed check. # - There is no difference between the churn rate in credit card and bank transfer. # ## Answering business questions # ##### [Q1] - Display a table to show the relationship between contracts and payment method and write down the observations. dfgp = df.groupby(["Contract", "Payment_Method"], as_index=False)["CustomerID"].count() pd.pivot_table( dfgp, index=dfgp["Contract"], columns=dfgp["Payment_Method"], values=["CustomerID"] )["CustomerID"] # - The most used payment method in month-to-month contracts is the Electronic check followed by Mailed check. # - There is no big difference in payment methods for one-year contracts, but credit cards and bank transfer are slightly higher than cheques. # - Credit cards and bank transfers are the most used in two-year contracts, postal checks are moderately used but electronic checks are the least used. # ##### [Q2] - Display a table to show the relationship between payment and internet service and write down the observations. dfgp = df.groupby(["Internet_Service", "Payment_Method"], as_index=False)[ "CustomerID" ].count() pd.pivot_table( dfgp, index=dfgp["Internet_Service"], columns=dfgp["Payment_Method"], values=["CustomerID"], )["CustomerID"] # - There is no big difference in payment methods for DSL, but credit cards and bank transfer are slightly lower than cheques. # - The payment method most used by customers with Optical Fiber is the Electronic Check followed by Bank Transfer and Bank Card. # - Customers who don't have Internet service mainly use Mailed checks. # ##### [Q3] - Display a table to show the relationship between contracts and internet service and write down the observations. # dfgp = df.groupby(["Contract", "Internet_Service"], as_index=False)[ "CustomerID" ].count() pd.pivot_table( dfgp, index=dfgp["Contract"], columns=dfgp["Internet_Service"], values=["CustomerID"], )["CustomerID"] # - Fiber optic customers mostly use the Month-to-month contract type, followed by one year and then two year. # - DSL customers mostly use the Month-to-month contract type, followed by two year and then one year. # - No internet service users mostly use the Two year contrat type, followed by Month to month and then one year # ##### [Q4] - Are there any outliers in the numerical columns # Hint: Write a code and check the percentile values of different columns # select numerical variables df_num = df[["Tenure", "Monthly_Charges", "Total_Charges"]] # calculate percentile q1 = df_num.quantile(0.25) q3 = df_num.quantile(0.75) IQR = q3 - q1 # select observations that are above q3+1.5*IQR and less than q1-1.5*IQR # dropna to leave only True values outliers = df_num[((df_num < (q1 - 1.5 * IQR)) | (df_num > (q3 + 1.5 * IQR)))].dropna() print("number of outliers: " + str(outliers.shape[0])) # Observations # - There is no outliers in the dataset. # ##### [Q5] - What is the percentage of customers who cancelled their subscription? # Calculate prctg using shape print( "The percentage of customers who cancelled their subscription is : {:.2f}%".format( round((df.loc[df["Churn"] == "Yes"].shape[0] / df.shape[0]) * 100, 2) ) ) # ##### [Q6] - What is the average monthly charge for different types of internet services? print("The average monthly charge for each types of internet service is :") # Calculate the average using groupby and mean round(df.groupby("Internet_Service")["Monthly_Charges"].mean(), 2).to_frame() # Monthly charges of Fiber optic is the highest. # ##### [Q7] - What is the total revenue for different Internet Services? print("The total revenue for different Internet Services is :") # Calculate the total revenue using groupby and sum df.groupby("Internet_Service")["Total_Charges"].sum().to_frame() # Despite the churn, optical fiber has the largest amount. For this purpose fiber is the most important source of revenue. # ##### [Q8] - What is the average tenure for different contracts? print("The average tenure for different contracts is :") # Calculate the average using groupby and mean round(df.groupby("Contract")["Tenure"].mean(), 2).to_frame() # The longer the contract, the longer the customer's average tenure time. # ##### [Q9] - What percentage of customers have online protection? # #### Online_Security: # Calculate prctg using shape print( "The percentage of customers who have online Security is : {:.2f}%".format( round((df.loc[df["Online_Security"] == "Yes"].shape[0] / df.shape[0]) * 100, 2) ) ) # #### Device_Protection: # Calculate prctg using shape print( "The percentage of customers who have Device Protection is : {:.2f}%".format( round( (df.loc[df["Device_Protection"] == "Yes"].shape[0] / df.shape[0]) * 100, 2 ) ) ) # ##### [Q10] - Write a code to replace 'Yes' with 1 and 'No' with 0 in the Churn column. df["Churn"].replace(("Yes", "No"), (1, 0), inplace=True) # ## Summary: # - High monthly charges are more likely to churn, the company may revise pricing. # - Seniors Citizens, customers without partners and customers without dependents have a higher percentage of churn = 'yes', to avoid this the company could make offers adapted to these customers. # - Customers who don't have this products are more likely to churn: # - Online backup # - Online security # - Device protection # - Tech support. # - However, the number of customers with these products remains modest. Thus, the company could make more effort to market these products. # - The company may encourage cutomers to use long-term contracts. # - Check if there are problems to resolve in paperless billing and in check because there is more churn at this level. # - The churn rate for customers equipped with fiber optics is the highest. Fiber optic observations also contain high monthly fees and month to month contracts - that paid mostly with checks- which we have identified as a likely causes for churn. # Since optical fiber is the highest source of revenue, the company should investigate what is the real cause of problem of this service and improve it. # ### Now our EDA part is done. Export the data and use the exported data to be used in Milestone 2 for building machine learning models. ### Export the dataset to be used for next milestone df.to_csv("A2Z_milestone_2_3_input.csv", index=False)
# ###### Notebook created by: Arnav Chavan (@[carnav0400](https://www.kaggle.com/carnav0400)), Udbhav Bamba (@[ubamba98](https://www.kaggle.com/ubamba98)) # ## NOTE: Turn on the Internet and GPU for this kernal before starting # # How to add dataset to the kernal # * Click on "Add Data" # * Search "CLabsCVcomp" # * Click on "Add" # * Done # ensembling my submissions with the most accuracy. # all my submissions are at https://www.kaggle.com/sakshamaggarwal/submissions-csv import pandas as pd import numpy as np data0 = pd.read_csv("../input/submissions-csv/submission0.csv") data1 = pd.read_csv("../input/submissions-csv/submission1.csv") data2 = pd.read_csv("../input/submissions-csv/submission2.csv") data3 = pd.read_csv("../input/submissions-csv/submission3.csv") data4 = pd.read_csv("../input/submissions-csv/submission4.csv") data5 = pd.read_csv("../input/submissions-csv/submission5.csv") data6 = pd.read_csv("../input/submissions-csv/submission6.csv") data7 = pd.read_csv("../input/submissions-csv/submission7.csv") data8 = pd.read_csv("../input/submissions-csv/submission8.csv") data9 = pd.read_csv("../input/submissions-csv/submission9.csv") data10 = pd.read_csv("../input/submissions-csv/submission10.csv") data11 = pd.read_csv("../input/submissions-csv/submission11.csv") data12 = pd.read_csv("../input/submissions-csv/submission12.csv") data13 = pd.read_csv("../input/submissions-csv/submission13.csv") data14 = pd.read_csv("../input/submissions-csv/submission14.csv") data15 = pd.read_csv("../input/submissions-csv/submission15.csv") data16 = pd.read_csv("../input/submissions-csv/submission16.csv") from collections import Counter a = {} for i in range(0, 29996): data = Counter( [ data4.genres[i], data0.genres[i], data7.genres[i], data11.genres[i], data16.genres[i], data9.genres[i], data5.genres[i], ] ) a[i] = data.most_common(1)[0][0] list = [(v) for k, v in a.items()] list dataarray = np.asarray(list) dataarray = dataarray.reshape(-1, 1) id = np.asarray(data0.id) id = id.reshape(-1, 1) g = np.concatenate([id, dataarray], axis=1) df = pd.DataFrame(g, columns=["id", "genres"]) df.to_csv("final-submissions1.csv", index=False)
# # Note: This is a work in progress notebook! # - Lot can be done in the feature engineering. # - Also passing Spark dataframe to Tensorflow without converting it to pandas. import tensorflow as tf from tensorflow.keras import Sequential, Model from pyspark.sql import SparkSession from pyspark.sql import functions as f import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) tf.__version__ spark = SparkSession.builder.getOrCreate() spark # # Read Inputs sdf_shops = spark.read.csv( "/kaggle/input/competitive-data-science-predict-future-sales/shops.csv", inferSchema=True, header=True, ) col_shops = ["shop_name", "shop_id"] sdf_item_categories = spark.read.csv( "/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv", inferSchema=True, header=True, ) col_item_categories = ["item_category_name", "item_category_id"] sdf_sales_train = spark.read.csv( "/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv", inferSchema=True, header=True, ) col_sales_train = ["date", "date_block_num", "shop_id", "item_id", "item_cnt_day"] sdf_items = spark.read.csv( "/kaggle/input/competitive-data-science-predict-future-sales/items.csv", inferSchema=True, header=True, ) col_items = ["item_name", "item_id", "item_category_id"] sdf_test = spark.read.csv( "/kaggle/input/competitive-data-science-predict-future-sales/test.csv", inferSchema=True, header=True, ) col_test = ["ID", "shop_id", "item_id"] sdf_sample_submission = spark.read.csv( "/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv", inferSchema=True, header=True, ) col_sample_submission = ["ID", "item_cnt_month"] # sdf_sample_submission.limit(5).toPandas().T # sdf_sales_train.withColumn('date', f.from_unixtime(f.unix_timestamp(sdf_sales_train['date'],'%d.%m.%Y'))) # # sdf_sales_train.limit(10).toPandas().T # sales_data = sdf_sales_train.toPandas() # sales_data.dtypes # # Negative Values ( Returns ) exclude or predict ? # change all of them to 0! from pyspark.sql import functions as f sdf_sales_train = sdf_sales_train.withColumn( "item_cnt_day", f.when(sdf_sales_train["item_cnt_day"] < 0, 0).otherwise( sdf_sales_train["item_cnt_day"] ), ) sdf_sales_train = sdf_sales_train.withColumn( "item_price", f.when(sdf_sales_train["item_price"] < 0, 0).otherwise( sdf_sales_train["item_price"] ), ) sdf_sales_train.where( (sdf_sales_train["item_cnt_day"] < 0) | (sdf_sales_train["item_price"] < 0) ).count() # # Type casting / data cleaning sales_data = sdf_sales_train.toPandas() sales_data["date"] = pd.to_datetime(sales_data["date"], format="%d.%m.%Y") sales_data.dtypes # sales_data.T dataset = sales_data.pivot_table( index=["shop_id", "item_id"], values=["item_cnt_day"], columns=["date_block_num"], fill_value=0, aggfunc="sum", ) dataset.reset_index(inplace=True) dataset.head() test_data = sdf_test.toPandas() dataset = pd.merge(test_data, dataset, on=["item_id", "shop_id"], how="left") dataset.fillna(0, inplace=True) dataset.head() dataset.drop(["shop_id", "item_id", "ID"], inplace=True, axis=1) dataset.head() # # Train Test Split # X we will keep all columns execpt the last one X_train = np.expand_dims(dataset.values[:, :-1], axis=2) # the last column is our label y_train = dataset.values[:, -1:] # for test we keep all the columns execpt the first one X_test = np.expand_dims(dataset.values[:, 1:], axis=2) # lets have a look on the shape print(X_train.shape, y_train.shape, X_test.shape) # # TF Model building model = tf.keras.models.Sequential() model.add(tf.keras.layers.LSTM(32, input_shape=X_train.shape[-2:])) model.add(tf.keras.layers.Dropout(0.4)) model.add(tf.keras.layers.Dense(1, activation="relu")) model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss="mae") # model.compile(loss = 'mse',optimizer = 'adam', metrics = ['mean_squared_error']) model.summary() tf.keras.utils.plot_model(model, show_layer_names=True, show_shapes=True) # # Train the model model.fit(X_train, y_train, batch_size=4096, epochs=10) # # Use trained model for Prediction # creating submission file submission_pfs = model.predict(X_test) # creating dataframe with required columns submission = pd.DataFrame( {"ID": test_data["ID"], "item_cnt_month": submission_pfs.ravel()} ) # creating csv file from dataframe submission.T submission.to_csv("submission.csv", index=False)