file_id
stringlengths
5
9
content
stringlengths
100
5.25M
local_path
stringlengths
66
70
kaggle_dataset_name
stringlengths
3
50
kaggle_dataset_owner
stringlengths
3
20
kversion
stringlengths
497
763
kversion_datasetsources
stringlengths
71
5.46k
dataset_versions
stringlengths
338
235k
datasets
stringlengths
334
371
users
stringlengths
111
264
script
stringlengths
100
5.25M
df_info
stringlengths
0
4.87M
has_data_info
bool
2 classes
nb_filenames
int64
0
370
retreived_data_description
stringlengths
0
4.44M
script_nb_tokens
int64
25
663k
upvotes
int64
0
1.65k
tokens_description
int64
25
663k
tokens_script
int64
25
663k
129788876
<jupyter_start><jupyter_text>ImageNet 1000 (mini) ### Context https://github.com/pytorch/examples/tree/master/imagenet Kaggle dataset identifier: imagenetmini-1000 <jupyter_script># Install dependecies import math, re, os import tensorflow as tf import numpy as np import pandas as pd import matplotlib.pyplot as plt import skimage import skimage.io from sklearn.model_selection import train_test_split from keras.utils import load_img, img_to_array, array_to_img from keras.preprocessing.image import ImageDataGenerator from tqdm import tqdm from kaggle_datasets import KaggleDatasets from tensorflow import keras from functools import partial print("Tensorflow version " + tf.__version__) import random from glob import glob from tensorflow.keras.optimizers import Adam import keras from keras.models import * from keras import layers from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.optimizers import Adam from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import VGG16 from IPython.display import display from PIL import Image mapping_path = ( "/kaggle/input/imagenet-object-localization-challenge/LOC_synset_mapping.txt" ) src_path_train = ( "/kaggle/input/imagenet-object-localization-challenge/ILSVRC/Data/CLS-LOC/train" ) src_path_test = ( "/kaggle/input/imagenet-object-localization-challenge/ILSVRC/Data/CLS-LOC/test" ) # Creation of mapping dictionaries to obtain the image classes class_mapping_dict = {} class_mapping_dict_number = {} mapping_class_to_number = {} mapping_number_to_class = {} i = 0 for line in open(mapping_path): class_mapping_dict[line[:9].strip()] = line[9:].strip() class_mapping_dict_number[i] = line[9:].strip() mapping_class_to_number[line[:9].strip()] = i mapping_number_to_class[i] = line[:9].strip() i += 1 # print(class_mapping_dict) # print(class_mapping_dict_number) # print(mapping_class_to_number) # print(mapping_number_to_class) # Creation of dataset_array and CLASSES CLASSES = [] images_array = [] for train_class in tqdm(os.listdir(src_path_train)): i = 0 for el in os.listdir(src_path_train + "/" + train_class): if i < 10: path = src_path_train + "/" + train_class + "/" + el image = load_img(path, target_size=(224, 224, 3)) image_array = img_to_array(image).astype(np.uint8) images_array.append(image_array) CLASS = class_mapping_dict[path.split("/")[-2]] CLASSES.append(CLASS) i += 1 else: break images_array = np.array(images_array) CLASSES = np.array(CLASSES) batch_size = 128 epochs = 100 # Creation of the train_generator and the test_generator image_gen = ImageDataGenerator( # rescale=1 / 255.0, # rotation_range=20, # zoom_range=0.05, # width_shift_range=0.05, # height_shift_range=0.05, # shear_range=0.05, # horizontal_flip=True, # fill_mode="nearest", preprocessing_function=preprocess_input, validation_split=0.20, ) train_generator = image_gen.flow_from_directory( src_path_train, target_size=(224, 224), shuffle=True, batch_size=batch_size, subset="training", class_mode="sparse", ) test_generator = image_gen.flow_from_directory( src_path_train, target_size=(224, 224), shuffle=True, batch_size=batch_size, subset="validation", class_mode="sparse", ) lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-5, decay_steps=10000, decay_rate=0.9 ) model_VGG16 = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3)) for layer in model_VGG16.layers: layer.trainable = False model = Sequential() model.add(model_VGG16) # model.add(layers.BatchNormalization(renorm=True)) model.add(layers.Flatten()) model.add(layers.Dense(units=4096, activation="relu")) # model.add(layers.Dropout(0.3)) # model.add(layers.BatchNormalization(renorm=True)) model.add(layers.Dense(units=4096, activation="relu")) # model.add(layers.Dropout(0.5)) # model.add(layers.BatchNormalization(renorm=True)) model.add(layers.Dense(units=1000, activation="softmax")) model.compile( optimizer=Adam(learning_rate=lr_scheduler, epsilon=0.001), loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) model.summary() early_stop = EarlyStopping( min_delta=0.001, # minimium amount of change to count as an improvement patience=10, # how many epochs to wait before stopping ) history = model.fit( train_generator, validation_data=test_generator, epochs=epochs, steps_per_epoch=len(train_generator) // batch_size, validation_steps=len(test_generator) // batch_size, callbacks=[early_stop], ) # create learning curves to evaluate model performance history_frame = pd.DataFrame(history.history) history_frame.loc[:, ["loss", "val_loss"]].plot() history_frame.loc[ :, ["sparse_categorical_accuracy", "val_sparse_categorical_accuracy"] ].plot()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/788/129788876.ipynb
imagenetmini-1000
ifigotin
[{"Id": 129788876, "ScriptId": 38358786, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13877157, "CreationDate": "05/16/2023 13:25:14", "VersionNumber": 1.0, "Title": "CNN_on_ImageNet_GPU", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 164.0, "LinesInsertedFromPrevious": 164.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186155496, "KernelVersionId": 129788876, "SourceDatasetVersionId": 998277}]
[{"Id": 998277, "DatasetId": 547506, "DatasourceVersionId": 1026923, "CreatorUserId": 2424380, "LicenseName": "Unknown", "CreationDate": "03/10/2020 01:05:11", "VersionNumber": 1.0, "Title": "ImageNet 1000 (mini)", "Slug": "imagenetmini-1000", "Subtitle": "1000 samples from ImageNet", "Description": "### Context\n\nhttps://github.com/pytorch/examples/tree/master/imagenet\n\n### Acknowledgements\n\nhttps://github.com/pytorch/examples/tree/master/imagenet", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 547506, "CreatorUserId": 2424380, "OwnerUserId": 2424380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 998277.0, "CurrentDatasourceVersionId": 1026923.0, "ForumId": 561077, "Type": 2, "CreationDate": "03/10/2020 01:05:11", "LastActivityDate": "03/10/2020", "TotalViews": 62479, "TotalDownloads": 11891, "TotalVotes": 134, "TotalKernels": 57}]
[{"Id": 2424380, "UserName": "ifigotin", "DisplayName": "Ilya Figotin", "RegisterDate": "10/29/2018", "PerformanceTier": 1}]
# Install dependecies import math, re, os import tensorflow as tf import numpy as np import pandas as pd import matplotlib.pyplot as plt import skimage import skimage.io from sklearn.model_selection import train_test_split from keras.utils import load_img, img_to_array, array_to_img from keras.preprocessing.image import ImageDataGenerator from tqdm import tqdm from kaggle_datasets import KaggleDatasets from tensorflow import keras from functools import partial print("Tensorflow version " + tf.__version__) import random from glob import glob from tensorflow.keras.optimizers import Adam import keras from keras.models import * from keras import layers from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.optimizers import Adam from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import VGG16 from IPython.display import display from PIL import Image mapping_path = ( "/kaggle/input/imagenet-object-localization-challenge/LOC_synset_mapping.txt" ) src_path_train = ( "/kaggle/input/imagenet-object-localization-challenge/ILSVRC/Data/CLS-LOC/train" ) src_path_test = ( "/kaggle/input/imagenet-object-localization-challenge/ILSVRC/Data/CLS-LOC/test" ) # Creation of mapping dictionaries to obtain the image classes class_mapping_dict = {} class_mapping_dict_number = {} mapping_class_to_number = {} mapping_number_to_class = {} i = 0 for line in open(mapping_path): class_mapping_dict[line[:9].strip()] = line[9:].strip() class_mapping_dict_number[i] = line[9:].strip() mapping_class_to_number[line[:9].strip()] = i mapping_number_to_class[i] = line[:9].strip() i += 1 # print(class_mapping_dict) # print(class_mapping_dict_number) # print(mapping_class_to_number) # print(mapping_number_to_class) # Creation of dataset_array and CLASSES CLASSES = [] images_array = [] for train_class in tqdm(os.listdir(src_path_train)): i = 0 for el in os.listdir(src_path_train + "/" + train_class): if i < 10: path = src_path_train + "/" + train_class + "/" + el image = load_img(path, target_size=(224, 224, 3)) image_array = img_to_array(image).astype(np.uint8) images_array.append(image_array) CLASS = class_mapping_dict[path.split("/")[-2]] CLASSES.append(CLASS) i += 1 else: break images_array = np.array(images_array) CLASSES = np.array(CLASSES) batch_size = 128 epochs = 100 # Creation of the train_generator and the test_generator image_gen = ImageDataGenerator( # rescale=1 / 255.0, # rotation_range=20, # zoom_range=0.05, # width_shift_range=0.05, # height_shift_range=0.05, # shear_range=0.05, # horizontal_flip=True, # fill_mode="nearest", preprocessing_function=preprocess_input, validation_split=0.20, ) train_generator = image_gen.flow_from_directory( src_path_train, target_size=(224, 224), shuffle=True, batch_size=batch_size, subset="training", class_mode="sparse", ) test_generator = image_gen.flow_from_directory( src_path_train, target_size=(224, 224), shuffle=True, batch_size=batch_size, subset="validation", class_mode="sparse", ) lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-5, decay_steps=10000, decay_rate=0.9 ) model_VGG16 = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3)) for layer in model_VGG16.layers: layer.trainable = False model = Sequential() model.add(model_VGG16) # model.add(layers.BatchNormalization(renorm=True)) model.add(layers.Flatten()) model.add(layers.Dense(units=4096, activation="relu")) # model.add(layers.Dropout(0.3)) # model.add(layers.BatchNormalization(renorm=True)) model.add(layers.Dense(units=4096, activation="relu")) # model.add(layers.Dropout(0.5)) # model.add(layers.BatchNormalization(renorm=True)) model.add(layers.Dense(units=1000, activation="softmax")) model.compile( optimizer=Adam(learning_rate=lr_scheduler, epsilon=0.001), loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) model.summary() early_stop = EarlyStopping( min_delta=0.001, # minimium amount of change to count as an improvement patience=10, # how many epochs to wait before stopping ) history = model.fit( train_generator, validation_data=test_generator, epochs=epochs, steps_per_epoch=len(train_generator) // batch_size, validation_steps=len(test_generator) // batch_size, callbacks=[early_stop], ) # create learning curves to evaluate model performance history_frame = pd.DataFrame(history.history) history_frame.loc[:, ["loss", "val_loss"]].plot() history_frame.loc[ :, ["sparse_categorical_accuracy", "val_sparse_categorical_accuracy"] ].plot()
false
0
1,512
0
1,564
1,512
129788516
<jupyter_start><jupyter_text>ISMI_Group3_PANDA_36_256_256_res1_tiles This dataset is a preprocessed provides a preprocessed version of the [PANDA](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment) challenge. Each sample has 36 tiles, of 256 x 256 pixels. The tiles are taken from the medium resolution. Kaggle dataset identifier: ismi-group3-panda-36-256-256-res1-tiles <jupyter_script>import torch import os import gc from PIL import Image import torchvision from torch.utils.data import DataLoader import torchvision.transforms as transforms import pytorch_lightning as pl import torch.nn.functional as F import numpy as np import json import requests import matplotlib.pyplot as plt import warnings import glob import pandas as pd import tqdm import random warnings.filterwarnings("ignore") # device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # print(f'Using {device} for inference') import os import sys import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib import matplotlib.pyplot as plt import PIL from IPython.display import Image, display import openslide # import skimage.io # import tifffile from tqdm.notebook import tqdm import zipfile import cv2 as cv import timm # device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # print(f'Using {device} for inference') with open("/kaggle/working/submission.csv", "w") as submis: pass # Location of the files # data_dir = '/kaggle/input/prostate-cancer-grade-assessment/test_images' # train_data_dir = '/kaggle/input/prostate-cancer-grade-assessment/test_images' # mask_dir = '/kaggle/input/prostate-cancer-grade-assessment/test_label_masks' # Location of training labels train_labels = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/test.csv" ).set_index("image_id") file_names = train_labels.index.tolist() # This function takes an openslide object and returns the top left coordinates of N tiles (of a given size) with the most tissue pixels. # Note: slide.level_dimensions[level] = (width,height). # Note: padding is done to the right and bottom, this is to keep it simple while having at most 1 tile in memory at a time. def get_tile_locations_from_slide(slide, tile_size, N, level): tiles = [] required_padding = False xlocs, ylocs = np.arange(0, slide.level_dimensions[level][0], tile_size), np.arange( 0, slide.level_dimensions[level][1], tile_size ) # Get the coordinates of the top left corners of the tiles. for x_i, xloc in enumerate(xlocs): for y_i, yloc in enumerate(ylocs): region = np.copy( slide.read_region( (xloc * (4**level), yloc * (4**level)), level, (tile_size, tile_size), ) ) # The position is wrt. level 0, so must convert to level 0 coordinates by multiplying by the downsampling factor. region_arr = np.asarray(region)[:, :, :3] # Ignore the alpha channel if ( xloc + tile_size > slide.level_dimensions[level][0] or yloc + tile_size > slide.level_dimensions[level][1] ): # if the tile goes out of bounds region_arr[region_arr == 0] = 255 required_padding = True pixel_sum = region_arr.sum() tiles.append( { "xloc": xloc, "yloc": yloc, "pixel_sum": pixel_sum, "required_padding": required_padding, } ) # store top left corner location and the tile's pixel_sum required_padding = False sorted_tiles = sorted( tiles, key=lambda d: d["pixel_sum"] ) # Sort tiles based on their pixel_sum field sorted_tiles = sorted_tiles[:N] # Get top N tiles return sorted_tiles # Creates a single image (array) from the selected tiles def create_tiled_image(slide, tiles, tile_size, N_tiles, level): N_side = int(np.sqrt(N_tiles)) # How many tiles is the image wide/tall tiled_image = ( np.ones((N_side * tile_size, N_side * tile_size, 3), dtype=np.uint8) * 255 ) for i, tile in enumerate(tiles): region = np.copy( np.asarray( slide.read_region( (tile["xloc"] * (4**level), tile["yloc"] * (4**level)), level, (tile_size, tile_size), ) ) ) # The position is wrt. level 0, so must convert to level 0 coordinates by multiplying by the downsampling factor. if tile["required_padding"]: region[region == 0] = 255 tiled_image[ tile_size * (i // (N_side)) : tile_size * (i // (N_side)) + tile_size, tile_size * (i % (N_side)) : tile_size * (i % (N_side)) + tile_size, :, ] = region[:, :, :3] return tiled_image # # Creates a single image (array) from the selected tiles # def save_tiles(slide, tiles, tile_size, N_tiles, out_path, level=1): # for n, tile in enumerate(save): # region = np.asarray(slide.read_region((tile['xloc']*(4**level),tile['yloc']*(4**level)), level, (tile_size,tile_size))) # The position is wrt. level 0, so must convert to level 0 coordinates by multiplying by the downsampling factor. # if tile['required_padding']: # region[region==0] = 255 # img = PIL.Image.fromarray(img) # img.save(os.path.join(folder, filename+"_tiled.png")) # return tiled_image # ### **Now we can load the model and write the csv file!** ########## THIS IF FOR WHOLE IMAGES ################ # Defining the tile Data Module TODO: use the imghash to make sure patients arent in test and train set # THE PARAMETERS 🔥 N_tiles = 6**2 # Number of tiles per image, should have a whole square root tile_size = 2**8 # Width/height of tile, 2**8 = 256 level = 1 # 0 is highest resolution, 2 is lowest resolution, good compromise is level 1 # MAX_EPOCHS = 1000 class PANDADataset(torch.utils.data.Dataset): def __init__(self, dataset: str = "train"): # assert dataset in ['train', 'test'], "dataset should one of \"train\" or \"test\"" super().__init__() self.df = pd.read_csv( f"/kaggle/input/prostate-cancer-grade-assessment/{dataset}.csv" ) self.imgdir = f"/kaggle/input/prostate-cancer-grade-assessment/{dataset}_images" assert len(self.df) == len(self.df["image_id"].unique()) self.num_classes = 6 def convert_to_ordinal(self, n: int, nclasses: int): ordinal = torch.zeros(nclasses) ordinal[0 : n + 1] = 1 return ordinal # For the independent tiles? def load_tiles(self, samplepath): tiles = glob.glob(os.path.join(samplepath, "tile_*.png")) tiles = [torchvision.io.read_image(tile) / 255 for tile in tiles] tiles = torch.stack(tiles) return tiles # Get a tiled image def __getitem__(self, idx): row = self.df.iloc[idx] file_name = row.loc["image_id"] slide = openslide.OpenSlide(os.path.join(self.imgdir, file_name + ".tiff")) tiles = get_tile_locations_from_slide( slide, tile_size, N_tiles, level ) # Get tile coordinates of top N tiles tiled_image = create_tiled_image( slide, tiles, tile_size, N_tiles, level ) # Convert the tiles information into a tiled image tiled_image = torch.tensor(tiled_image.transpose(2, 1, 0)) return tiled_image def __len__(self): return len(self.df) # print(f"{torch.cuda.memory_allocated()*1e-9:.4f}, GiB") test_data = PANDADataset() BATCH_SIZE = 1 test_dataloader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False) class efficientnetModule(pl.LightningModule): def __init__(self): super().__init__() self.model = timm.create_model( "tf_efficientnet_b0", checkpoint_path="/kaggle/input/tf-efficientnet/pytorch/tf-efficientnet-b0/1/tf_efficientnet_b0_aa-827b6e33.pth", ) self.model.classifier = torch.nn.Linear( in_features=self.model.classifier.in_features, out_features=test_data.num_classes, bias=True, ) def get_prediction(self, output): # this changes the prediction of format (0.01, 0.9, 0.8) to (1., 1., 0.) for i, prediction in enumerate(output): maxi = torch.argmax(prediction) prediction[0 : maxi + 1] = 1 prediction[maxi + 1 :] = 0 output[i] = prediction return output def test_step(self, batch, batch_idx): loss = self.validation_step(batch, batch_idx) return loss def forward(self, x): output = self.model(x) return output def configure_optimizers(self): return torch.optim.Adam(self.model.classifier.fc.parameters(), lr=0.02) efficientNet = efficientnetModule() trainedEfficientNet = efficientNet.load_from_checkpoint( "/kaggle/input/baseline-trained-models/Models/Timm_model_5.ckpt" ) trainedEfficientNet.eval() # trainedEfficientNet.to(device) print("Network loaded") # # trainer = pl.Trainer(accelerator="cuda", devices=find_usable_cuda_devices(2)) # chk_path = "/kaggle/input/modello/best_model(1).ckpt" # model2 = efficientnetModule.load_from_checkpoint(chk_path) # # results = trainer.test(model=model2, datamodule=efficientnetModule, verbose=True) # # results # # Naive solution (Requires cuda to be enabled) (https://www.kaggle.com/code/mudittiwari255/pytorch-lightning-baseline) # for file_name in tqdm(test_data.df['image_id'][:10]): # slide = openslide.OpenSlide(os.path.join(test_data.imgdir, file_name+'.tiff')) # tiles = get_tile_locations_from_slide(slide, tile_size, N_tiles, level) # Get tile coordinates of top N tiles # tiled_image = create_tiled_image(slide, tiles, tile_size, N_tiles, level) # Convert the tiles information into a tiled image # tiled_image = torch.tensor(tiled_image.transpose(2,1,0)) # tiled_image = tiled_image[None,:]#.to(device) # make batch of 1 sample # output = trainedEfficientNet(tiled_image.float()) # prediction = torch.argmax(output) # print(prediction) # # Pytorch lightning solution # n_dev = 1 # trainer = pl.Trainer(accelerator='gpu', devices=n_dev, enable_progress_bar=True) # with open('/kaggle/working/submission.csv', 'w') as submis: # submis.write('image_id,isup_grade') # trainer.test(model = trainedEfficientNet, dataloaders=test_dataloader, ckpt_path ='/kaggle/input/baseline-trained-models/Models/Timm_model_5.ckpt', verbose=True) # with open('/kaggle/working/submission.csv', 'w') as submis: # submis.write('image_id,isup_grade') # with torch.no_grad(): # for test_img in test_data: # result = model2.model(test_img) # submis.write(f'{testimg},{int(torch.sum(self.get_prediction(y_hat)))}')
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/788/129788516.ipynb
ismi-group3-panda-36-256-256-res1-tiles
florisvanwettum
[{"Id": 129788516, "ScriptId": 38540203, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2476129, "CreationDate": "05/16/2023 13:22:26", "VersionNumber": 7.0, "Title": "PANDA_Submission_notebook", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 215.0, "LinesInsertedFromPrevious": 50.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 165.0, "LinesInsertedFromFork": 88.0, "LinesDeletedFromFork": 201.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 127.0, "TotalVotes": 0}]
[{"Id": 186155043, "KernelVersionId": 129788516, "SourceDatasetVersionId": 5682352}]
[{"Id": 5682352, "DatasetId": 3228105, "DatasourceVersionId": 5757916, "CreatorUserId": 2476129, "LicenseName": "Unknown", "CreationDate": "05/14/2023 11:36:05", "VersionNumber": 5.0, "Title": "ISMI_Group3_PANDA_36_256_256_res1_tiles", "Slug": "ismi-group3-panda-36-256-256-res1-tiles", "Subtitle": "Medium resolution 36 256x256 tiles per sample, individual and combined images.", "Description": "This dataset is a preprocessed provides a preprocessed version of the [PANDA](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment) challenge. Each sample has 36 tiles, of 256 x 256 pixels. The tiles are taken from the medium resolution.", "VersionNotes": "Added the last sample of the train.csv to the tiled_images", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3228105, "CreatorUserId": 2476129, "OwnerUserId": 2476129.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5682352.0, "CurrentDatasourceVersionId": 5757916.0, "ForumId": 3293216, "Type": 2, "CreationDate": "05/05/2023 21:28:46", "LastActivityDate": "05/05/2023", "TotalViews": 99, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 4}]
[{"Id": 2476129, "UserName": "florisvanwettum", "DisplayName": "Florijs", "RegisterDate": "11/10/2018", "PerformanceTier": 0}]
import torch import os import gc from PIL import Image import torchvision from torch.utils.data import DataLoader import torchvision.transforms as transforms import pytorch_lightning as pl import torch.nn.functional as F import numpy as np import json import requests import matplotlib.pyplot as plt import warnings import glob import pandas as pd import tqdm import random warnings.filterwarnings("ignore") # device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # print(f'Using {device} for inference') import os import sys import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib import matplotlib.pyplot as plt import PIL from IPython.display import Image, display import openslide # import skimage.io # import tifffile from tqdm.notebook import tqdm import zipfile import cv2 as cv import timm # device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # print(f'Using {device} for inference') with open("/kaggle/working/submission.csv", "w") as submis: pass # Location of the files # data_dir = '/kaggle/input/prostate-cancer-grade-assessment/test_images' # train_data_dir = '/kaggle/input/prostate-cancer-grade-assessment/test_images' # mask_dir = '/kaggle/input/prostate-cancer-grade-assessment/test_label_masks' # Location of training labels train_labels = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/test.csv" ).set_index("image_id") file_names = train_labels.index.tolist() # This function takes an openslide object and returns the top left coordinates of N tiles (of a given size) with the most tissue pixels. # Note: slide.level_dimensions[level] = (width,height). # Note: padding is done to the right and bottom, this is to keep it simple while having at most 1 tile in memory at a time. def get_tile_locations_from_slide(slide, tile_size, N, level): tiles = [] required_padding = False xlocs, ylocs = np.arange(0, slide.level_dimensions[level][0], tile_size), np.arange( 0, slide.level_dimensions[level][1], tile_size ) # Get the coordinates of the top left corners of the tiles. for x_i, xloc in enumerate(xlocs): for y_i, yloc in enumerate(ylocs): region = np.copy( slide.read_region( (xloc * (4**level), yloc * (4**level)), level, (tile_size, tile_size), ) ) # The position is wrt. level 0, so must convert to level 0 coordinates by multiplying by the downsampling factor. region_arr = np.asarray(region)[:, :, :3] # Ignore the alpha channel if ( xloc + tile_size > slide.level_dimensions[level][0] or yloc + tile_size > slide.level_dimensions[level][1] ): # if the tile goes out of bounds region_arr[region_arr == 0] = 255 required_padding = True pixel_sum = region_arr.sum() tiles.append( { "xloc": xloc, "yloc": yloc, "pixel_sum": pixel_sum, "required_padding": required_padding, } ) # store top left corner location and the tile's pixel_sum required_padding = False sorted_tiles = sorted( tiles, key=lambda d: d["pixel_sum"] ) # Sort tiles based on their pixel_sum field sorted_tiles = sorted_tiles[:N] # Get top N tiles return sorted_tiles # Creates a single image (array) from the selected tiles def create_tiled_image(slide, tiles, tile_size, N_tiles, level): N_side = int(np.sqrt(N_tiles)) # How many tiles is the image wide/tall tiled_image = ( np.ones((N_side * tile_size, N_side * tile_size, 3), dtype=np.uint8) * 255 ) for i, tile in enumerate(tiles): region = np.copy( np.asarray( slide.read_region( (tile["xloc"] * (4**level), tile["yloc"] * (4**level)), level, (tile_size, tile_size), ) ) ) # The position is wrt. level 0, so must convert to level 0 coordinates by multiplying by the downsampling factor. if tile["required_padding"]: region[region == 0] = 255 tiled_image[ tile_size * (i // (N_side)) : tile_size * (i // (N_side)) + tile_size, tile_size * (i % (N_side)) : tile_size * (i % (N_side)) + tile_size, :, ] = region[:, :, :3] return tiled_image # # Creates a single image (array) from the selected tiles # def save_tiles(slide, tiles, tile_size, N_tiles, out_path, level=1): # for n, tile in enumerate(save): # region = np.asarray(slide.read_region((tile['xloc']*(4**level),tile['yloc']*(4**level)), level, (tile_size,tile_size))) # The position is wrt. level 0, so must convert to level 0 coordinates by multiplying by the downsampling factor. # if tile['required_padding']: # region[region==0] = 255 # img = PIL.Image.fromarray(img) # img.save(os.path.join(folder, filename+"_tiled.png")) # return tiled_image # ### **Now we can load the model and write the csv file!** ########## THIS IF FOR WHOLE IMAGES ################ # Defining the tile Data Module TODO: use the imghash to make sure patients arent in test and train set # THE PARAMETERS 🔥 N_tiles = 6**2 # Number of tiles per image, should have a whole square root tile_size = 2**8 # Width/height of tile, 2**8 = 256 level = 1 # 0 is highest resolution, 2 is lowest resolution, good compromise is level 1 # MAX_EPOCHS = 1000 class PANDADataset(torch.utils.data.Dataset): def __init__(self, dataset: str = "train"): # assert dataset in ['train', 'test'], "dataset should one of \"train\" or \"test\"" super().__init__() self.df = pd.read_csv( f"/kaggle/input/prostate-cancer-grade-assessment/{dataset}.csv" ) self.imgdir = f"/kaggle/input/prostate-cancer-grade-assessment/{dataset}_images" assert len(self.df) == len(self.df["image_id"].unique()) self.num_classes = 6 def convert_to_ordinal(self, n: int, nclasses: int): ordinal = torch.zeros(nclasses) ordinal[0 : n + 1] = 1 return ordinal # For the independent tiles? def load_tiles(self, samplepath): tiles = glob.glob(os.path.join(samplepath, "tile_*.png")) tiles = [torchvision.io.read_image(tile) / 255 for tile in tiles] tiles = torch.stack(tiles) return tiles # Get a tiled image def __getitem__(self, idx): row = self.df.iloc[idx] file_name = row.loc["image_id"] slide = openslide.OpenSlide(os.path.join(self.imgdir, file_name + ".tiff")) tiles = get_tile_locations_from_slide( slide, tile_size, N_tiles, level ) # Get tile coordinates of top N tiles tiled_image = create_tiled_image( slide, tiles, tile_size, N_tiles, level ) # Convert the tiles information into a tiled image tiled_image = torch.tensor(tiled_image.transpose(2, 1, 0)) return tiled_image def __len__(self): return len(self.df) # print(f"{torch.cuda.memory_allocated()*1e-9:.4f}, GiB") test_data = PANDADataset() BATCH_SIZE = 1 test_dataloader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False) class efficientnetModule(pl.LightningModule): def __init__(self): super().__init__() self.model = timm.create_model( "tf_efficientnet_b0", checkpoint_path="/kaggle/input/tf-efficientnet/pytorch/tf-efficientnet-b0/1/tf_efficientnet_b0_aa-827b6e33.pth", ) self.model.classifier = torch.nn.Linear( in_features=self.model.classifier.in_features, out_features=test_data.num_classes, bias=True, ) def get_prediction(self, output): # this changes the prediction of format (0.01, 0.9, 0.8) to (1., 1., 0.) for i, prediction in enumerate(output): maxi = torch.argmax(prediction) prediction[0 : maxi + 1] = 1 prediction[maxi + 1 :] = 0 output[i] = prediction return output def test_step(self, batch, batch_idx): loss = self.validation_step(batch, batch_idx) return loss def forward(self, x): output = self.model(x) return output def configure_optimizers(self): return torch.optim.Adam(self.model.classifier.fc.parameters(), lr=0.02) efficientNet = efficientnetModule() trainedEfficientNet = efficientNet.load_from_checkpoint( "/kaggle/input/baseline-trained-models/Models/Timm_model_5.ckpt" ) trainedEfficientNet.eval() # trainedEfficientNet.to(device) print("Network loaded") # # trainer = pl.Trainer(accelerator="cuda", devices=find_usable_cuda_devices(2)) # chk_path = "/kaggle/input/modello/best_model(1).ckpt" # model2 = efficientnetModule.load_from_checkpoint(chk_path) # # results = trainer.test(model=model2, datamodule=efficientnetModule, verbose=True) # # results # # Naive solution (Requires cuda to be enabled) (https://www.kaggle.com/code/mudittiwari255/pytorch-lightning-baseline) # for file_name in tqdm(test_data.df['image_id'][:10]): # slide = openslide.OpenSlide(os.path.join(test_data.imgdir, file_name+'.tiff')) # tiles = get_tile_locations_from_slide(slide, tile_size, N_tiles, level) # Get tile coordinates of top N tiles # tiled_image = create_tiled_image(slide, tiles, tile_size, N_tiles, level) # Convert the tiles information into a tiled image # tiled_image = torch.tensor(tiled_image.transpose(2,1,0)) # tiled_image = tiled_image[None,:]#.to(device) # make batch of 1 sample # output = trainedEfficientNet(tiled_image.float()) # prediction = torch.argmax(output) # print(prediction) # # Pytorch lightning solution # n_dev = 1 # trainer = pl.Trainer(accelerator='gpu', devices=n_dev, enable_progress_bar=True) # with open('/kaggle/working/submission.csv', 'w') as submis: # submis.write('image_id,isup_grade') # trainer.test(model = trainedEfficientNet, dataloaders=test_dataloader, ckpt_path ='/kaggle/input/baseline-trained-models/Models/Timm_model_5.ckpt', verbose=True) # with open('/kaggle/working/submission.csv', 'w') as submis: # submis.write('image_id,isup_grade') # with torch.no_grad(): # for test_img in test_data: # result = model2.model(test_img) # submis.write(f'{testimg},{int(torch.sum(self.get_prediction(y_hat)))}')
false
1
3,148
0
3,283
3,148
129788001
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns df1 = pd.read_csv("/kaggle/input/house-pricecsv/train.csv") df2 = pd.read_csv("/kaggle/input/house-pricecsv/test.csv") df = pd.concat([df1, df2], axis=0, ignore_index=True) df.head() df.tail() df.shape df.describe() df.isnull().sum() df.info() df.dtypes df.isnull() df_training = df[df["SalePrice"].notna()] df_training.head() df_training.shape df_training.nunique() numeric_columns = df_training.describe().columns numeric_columns categorical_columns = df_training.describe(include="O").columns categorical_columns df_training.describe() numeric_columns.isnull().sum() categorical_columns.isnull().sum() type(categorical_columns) categorical_columns = df_training.describe(include="O").columns for i in categorical_columns: print(i) print(df_training[i].unique()) print(df_training["MiscFeature"].isnull().sum()) print(df_training["Fence"].isnull().sum()) print(df_training["PoolQC"].isnull().sum()) print(df_training["GarageCond"].isnull().sum()) print(df_training["GarageQual"].isnull().sum()) print(df_training["GarageFinish"].isnull().sum()) print(df_training["GarageType"].isnull().sum()) print(df_training["GarageFinish"].isnull().sum()) print(df_training["GarageFinish"].isnull().sum()) df_training.shape # Exploring multicollinearity. from statsmodels.stats.outliers_influence import variance_inflation_factor def calc_vif(X): # Calculating VIF vif = pd.DataFrame() vif["variables"] = X.columns vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])] return vif calc_vif( df_training[[i for i in df_training.describe().columns if i not in ["SalePrice"]]] ) df_infer = df[df["SalePrice"].isna()] df_infer df_infer.shape
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/788/129788001.ipynb
null
null
[{"Id": 129788001, "ScriptId": 38585324, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13782320, "CreationDate": "05/16/2023 13:18:28", "VersionNumber": 1.0, "Title": "House_Price", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 115.0, "LinesInsertedFromPrevious": 115.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns df1 = pd.read_csv("/kaggle/input/house-pricecsv/train.csv") df2 = pd.read_csv("/kaggle/input/house-pricecsv/test.csv") df = pd.concat([df1, df2], axis=0, ignore_index=True) df.head() df.tail() df.shape df.describe() df.isnull().sum() df.info() df.dtypes df.isnull() df_training = df[df["SalePrice"].notna()] df_training.head() df_training.shape df_training.nunique() numeric_columns = df_training.describe().columns numeric_columns categorical_columns = df_training.describe(include="O").columns categorical_columns df_training.describe() numeric_columns.isnull().sum() categorical_columns.isnull().sum() type(categorical_columns) categorical_columns = df_training.describe(include="O").columns for i in categorical_columns: print(i) print(df_training[i].unique()) print(df_training["MiscFeature"].isnull().sum()) print(df_training["Fence"].isnull().sum()) print(df_training["PoolQC"].isnull().sum()) print(df_training["GarageCond"].isnull().sum()) print(df_training["GarageQual"].isnull().sum()) print(df_training["GarageFinish"].isnull().sum()) print(df_training["GarageType"].isnull().sum()) print(df_training["GarageFinish"].isnull().sum()) print(df_training["GarageFinish"].isnull().sum()) df_training.shape # Exploring multicollinearity. from statsmodels.stats.outliers_influence import variance_inflation_factor def calc_vif(X): # Calculating VIF vif = pd.DataFrame() vif["variables"] = X.columns vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])] return vif calc_vif( df_training[[i for i in df_training.describe().columns if i not in ["SalePrice"]]] ) df_infer = df[df["SalePrice"].isna()] df_infer df_infer.shape
false
0
756
0
756
756
129788869
<jupyter_start><jupyter_text>Best Books (10k) Multi-Genre Data # Context This data was collected in an attempt personally identify more books that one would like based on ones they may have read in the past. It comprises of some (around 10000) of the most recommended books of all time. ### *Please Upvote if this helps you!* # Content 1. **Book** - Name of the book. Soemtimes this includes the details of the Series it belongs to inside a parenthesis. This information can be further extracted to analyse only series. 2. **Author** - Name of the book's Author 3. **Description** - The book's description as mentioned on Goodreads 4. **Genres** - Multiple Genres as classified on Goodreads. Could be useful for Multi-label classification or Content based recommendation and Clustering. 5. **Average Rating** - The average rating (Out of 5) given on Goodreads 6. **Number of Ratings** - The Number of users that have Ratings. (Not to be confused with reviews) 7. **URL** - The Goodreads URL for the book's details' page # Inspiration - Cluster books/authors based on Description and Genre - Content based recomendation system using Genre, Description and Ratings - Genre prediction from Description data (Multi-label classification) - Can be used in conjunction with my [IMDb dataset with descriptions](https://www.kaggle.com/datasets/ishikajohari/imdb-data-with-descriptions) for certain use cases # Acknowledgements The data was collected from Goodreads from the list - *Books That Everyone Should Read At Least Once* Kaggle dataset identifier: best-books-10k-multi-genre-data <jupyter_script>import pandas as pd import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import train_test_split from sklearn.preprocessing import MultiLabelBinarizer from sklearn.multiclass import OneVsRestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score from sklearn.cluster import KMeans from wordcloud import WordCloud import matplotlib.pyplot as plt df = pd.read_csv("/kaggle/input/best-books-10k-multi-genre-data/goodreads_data.csv") df.head() # # Data Preparation df["Description"] = df["Description"].fillna("") # # Content Based Recommendation # Convert the book descriptions into TF-IDF vectors vectorizer = TfidfVectorizer(stop_words="english") tfidf_matrix = vectorizer.fit_transform(df["Description"]) # Compute the cosine similarity matrix cosine_sim = cosine_similarity(tfidf_matrix, tfidf_matrix) # Function to get the most similar books def get_recommendations(title, cosine_sim=cosine_sim): # Get the index of the book that matches the title idx = df[df["Book"] == title].index[0] # Get the pairwsie similarity scores of all books with that book sim_scores = list(enumerate(cosine_sim[idx])) # Sort the books based on the similarity scores sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) # Get the scores of the 10 most similar books sim_scores = sim_scores[1:11] # Get the book indices book_indices = [i[0] for i in sim_scores] # Return the top 10 most similar books return df["Book"].iloc[book_indices] # # Genre Prediction # Convert Genres from string to list df["Genres"] = df["Genres"].apply(eval) # Binarize the genres mlb = MultiLabelBinarizer() y = mlb.fit_transform(df["Genres"]) # Split the data X_train, X_test, y_train, y_test = train_test_split( df["Description"], y, test_size=0.2, random_state=0 ) # Convert the descriptions into TF-IDF vectors vectorizer = TfidfVectorizer(stop_words="english") X_train = vectorizer.fit_transform(X_train) X_test = vectorizer.transform(X_test) # Train a multi-label classifier clf = OneVsRestClassifier(LogisticRegression(solver="lbfgs")) clf.fit(X_train, y_train) # Predict the test set results y_pred = clf.predict(X_test) # Compute the F1 score print(f1_score(y_test, y_pred, average="micro")) # # Author Clustering # Convert the book descriptions into TF-IDF vectors vectorizer = TfidfVectorizer(stop_words="english") tfidf_matrix = vectorizer.fit_transform(df["Description"]) # Compute K-Means clustering kmeans = KMeans(n_clusters=5, n_init=10) kmeans.fit(tfidf_matrix) # Add cluster number to the original dataframe df["Cluster"] = kmeans.labels_ # Print the number of authors in each cluster print(df.groupby("Cluster")["Author"].nunique()) # # Sentiment from textblob import TextBlob # Calculate sentiment polarity of descriptions df["Sentiment"] = df["Description"].apply( lambda text: TextBlob(text).sentiment.polarity ) # Check average sentiment by rating print(df.groupby("Avg_Rating")["Sentiment"].mean()) # # Visualisation # Generate a word cloud for book descriptions text = " ".join(description for description in df["Description"]) wordcloud = WordCloud(background_color="white").generate(text) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # Plot the distribution of average ratings df["Avg_Rating"].hist(bins=20) plt.xlabel("Average Rating") plt.ylabel("Count") plt.title("Distribution of Average Ratings") plt.show() # Plot the top 10 most common genres df["Genres"].explode().value_counts()[:10].plot(kind="bar") plt.xlabel("Genre") plt.ylabel("Count") plt.title("Top 10 Most Common Genres") plt.show() # Author Distribution df["Author"].value_counts().head(10).plot(kind="bar") plt.xlabel("Author") plt.ylabel("Number of Books") plt.title("Top 10 Authors with the Most Books") plt.show() # Sentiment Distribution df["Sentiment"].hist(bins=20) plt.xlabel("Sentiment Score") plt.ylabel("Number of Books") plt.title("Distribution of Sentiment Scores") plt.show() # Cluster Size Distribution df["Cluster"].value_counts().plot(kind="bar") plt.xlabel("Cluster") plt.ylabel("Number of Authors") plt.title("Number of Authors in Each Cluster") plt.show() # Avg Rating by Cluster df.groupby("Cluster")["Avg_Rating"].mean().plot(kind="bar") plt.xlabel("Cluster") plt.ylabel("Average Rating") plt.title("Average Rating by Cluster") plt.show() # Heatmap of Genres from sklearn.preprocessing import MultiLabelBinarizer import seaborn as sns # Get a list of all genres all_genres = list(set([g for sublist in df["Genres"].tolist() for g in sublist])) # Binarize the genres mlb = MultiLabelBinarizer(classes=all_genres) binary_genres = mlb.fit_transform(df["Genres"]) # Create a DataFrame from our binary matrix, and calculate the correlations binary_genres_df = pd.DataFrame(binary_genres, columns=mlb.classes_) correlations = binary_genres_df.corr() # Plot the correlations in a heatmap plt.figure(figsize=(10, 10)) sns.heatmap(correlations, cmap="coolwarm", center=0) plt.title("Genre Correlations") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/788/129788869.ipynb
best-books-10k-multi-genre-data
ishikajohari
[{"Id": 129788869, "ScriptId": 38584099, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4335187, "CreationDate": "05/16/2023 13:25:13", "VersionNumber": 1.0, "Title": "Books EDA", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 180.0, "LinesInsertedFromPrevious": 180.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 186155490, "KernelVersionId": 129788869, "SourceDatasetVersionId": 5618933}]
[{"Id": 5618933, "DatasetId": 3230801, "DatasourceVersionId": 5694114, "CreatorUserId": 5431518, "LicenseName": "CC0: Public Domain", "CreationDate": "05/06/2023 14:13:35", "VersionNumber": 2.0, "Title": "Best Books (10k) Multi-Genre Data", "Slug": "best-books-10k-multi-genre-data", "Subtitle": "Data from the \"Books That Everyone Should Read At Least Once\" list on Goodreads", "Description": "# Context\nThis data was collected in an attempt personally identify more books that one would like based on ones they may have read in the past. It comprises of some (around 10000) of the most recommended books of all time.\n\n### *Please Upvote if this helps you!*\n\n# Content\n1. **Book** - Name of the book. Soemtimes this includes the details of the Series it belongs to inside a parenthesis. This information can be further extracted to analyse only series.\n2. **Author** - Name of the book's Author\n3. **Description** - The book's description as mentioned on Goodreads\n4. **Genres** - Multiple Genres as classified on Goodreads. Could be useful for Multi-label classification or Content based recommendation and Clustering.\n5. **Average Rating** - The average rating (Out of 5) given on Goodreads\n6. **Number of Ratings** - The Number of users that have Ratings. (Not to be confused with reviews)\n7. **URL** - The Goodreads URL for the book's details' page\n\n# Inspiration\n- Cluster books/authors based on Description and Genre\n- Content based recomendation system using Genre, Description and Ratings\n- Genre prediction from Description data (Multi-label classification)\n- Can be used in conjunction with my [IMDb dataset with descriptions](https://www.kaggle.com/datasets/ishikajohari/imdb-data-with-descriptions) for certain use cases\n\n# Acknowledgements\nThe data was collected from Goodreads from the list - *Books That Everyone Should Read At Least Once*", "VersionNotes": "Data Update 2023-05-06", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3230801, "CreatorUserId": 5431518, "OwnerUserId": 5431518.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5618933.0, "CurrentDatasourceVersionId": 5694114.0, "ForumId": 3295942, "Type": 2, "CreationDate": "05/06/2023 13:43:14", "LastActivityDate": "05/06/2023", "TotalViews": 8727, "TotalDownloads": 1388, "TotalVotes": 56, "TotalKernels": 8}]
[{"Id": 5431518, "UserName": "ishikajohari", "DisplayName": "Ishika Johari", "RegisterDate": "07/07/2020", "PerformanceTier": 2}]
import pandas as pd import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import train_test_split from sklearn.preprocessing import MultiLabelBinarizer from sklearn.multiclass import OneVsRestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score from sklearn.cluster import KMeans from wordcloud import WordCloud import matplotlib.pyplot as plt df = pd.read_csv("/kaggle/input/best-books-10k-multi-genre-data/goodreads_data.csv") df.head() # # Data Preparation df["Description"] = df["Description"].fillna("") # # Content Based Recommendation # Convert the book descriptions into TF-IDF vectors vectorizer = TfidfVectorizer(stop_words="english") tfidf_matrix = vectorizer.fit_transform(df["Description"]) # Compute the cosine similarity matrix cosine_sim = cosine_similarity(tfidf_matrix, tfidf_matrix) # Function to get the most similar books def get_recommendations(title, cosine_sim=cosine_sim): # Get the index of the book that matches the title idx = df[df["Book"] == title].index[0] # Get the pairwsie similarity scores of all books with that book sim_scores = list(enumerate(cosine_sim[idx])) # Sort the books based on the similarity scores sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) # Get the scores of the 10 most similar books sim_scores = sim_scores[1:11] # Get the book indices book_indices = [i[0] for i in sim_scores] # Return the top 10 most similar books return df["Book"].iloc[book_indices] # # Genre Prediction # Convert Genres from string to list df["Genres"] = df["Genres"].apply(eval) # Binarize the genres mlb = MultiLabelBinarizer() y = mlb.fit_transform(df["Genres"]) # Split the data X_train, X_test, y_train, y_test = train_test_split( df["Description"], y, test_size=0.2, random_state=0 ) # Convert the descriptions into TF-IDF vectors vectorizer = TfidfVectorizer(stop_words="english") X_train = vectorizer.fit_transform(X_train) X_test = vectorizer.transform(X_test) # Train a multi-label classifier clf = OneVsRestClassifier(LogisticRegression(solver="lbfgs")) clf.fit(X_train, y_train) # Predict the test set results y_pred = clf.predict(X_test) # Compute the F1 score print(f1_score(y_test, y_pred, average="micro")) # # Author Clustering # Convert the book descriptions into TF-IDF vectors vectorizer = TfidfVectorizer(stop_words="english") tfidf_matrix = vectorizer.fit_transform(df["Description"]) # Compute K-Means clustering kmeans = KMeans(n_clusters=5, n_init=10) kmeans.fit(tfidf_matrix) # Add cluster number to the original dataframe df["Cluster"] = kmeans.labels_ # Print the number of authors in each cluster print(df.groupby("Cluster")["Author"].nunique()) # # Sentiment from textblob import TextBlob # Calculate sentiment polarity of descriptions df["Sentiment"] = df["Description"].apply( lambda text: TextBlob(text).sentiment.polarity ) # Check average sentiment by rating print(df.groupby("Avg_Rating")["Sentiment"].mean()) # # Visualisation # Generate a word cloud for book descriptions text = " ".join(description for description in df["Description"]) wordcloud = WordCloud(background_color="white").generate(text) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # Plot the distribution of average ratings df["Avg_Rating"].hist(bins=20) plt.xlabel("Average Rating") plt.ylabel("Count") plt.title("Distribution of Average Ratings") plt.show() # Plot the top 10 most common genres df["Genres"].explode().value_counts()[:10].plot(kind="bar") plt.xlabel("Genre") plt.ylabel("Count") plt.title("Top 10 Most Common Genres") plt.show() # Author Distribution df["Author"].value_counts().head(10).plot(kind="bar") plt.xlabel("Author") plt.ylabel("Number of Books") plt.title("Top 10 Authors with the Most Books") plt.show() # Sentiment Distribution df["Sentiment"].hist(bins=20) plt.xlabel("Sentiment Score") plt.ylabel("Number of Books") plt.title("Distribution of Sentiment Scores") plt.show() # Cluster Size Distribution df["Cluster"].value_counts().plot(kind="bar") plt.xlabel("Cluster") plt.ylabel("Number of Authors") plt.title("Number of Authors in Each Cluster") plt.show() # Avg Rating by Cluster df.groupby("Cluster")["Avg_Rating"].mean().plot(kind="bar") plt.xlabel("Cluster") plt.ylabel("Average Rating") plt.title("Average Rating by Cluster") plt.show() # Heatmap of Genres from sklearn.preprocessing import MultiLabelBinarizer import seaborn as sns # Get a list of all genres all_genres = list(set([g for sublist in df["Genres"].tolist() for g in sublist])) # Binarize the genres mlb = MultiLabelBinarizer(classes=all_genres) binary_genres = mlb.fit_transform(df["Genres"]) # Create a DataFrame from our binary matrix, and calculate the correlations binary_genres_df = pd.DataFrame(binary_genres, columns=mlb.classes_) correlations = binary_genres_df.corr() # Plot the correlations in a heatmap plt.figure(figsize=(10, 10)) sns.heatmap(correlations, cmap="coolwarm", center=0) plt.title("Genre Correlations") plt.show()
false
1
1,541
3
1,943
1,541
129788906
# # データを読み込む import pandas as pd data_df = pd.read_csv("/kaggle/input/kdl-datascience-compe/train.csv") data_df.columns # # 改装について # 改装すると価格が上がる気がするので可視化してみる。 data_df[["改装", "取引価格(総額)_log"]].groupby("改装").mean()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/788/129788906.ipynb
null
null
[{"Id": 129788906, "ScriptId": 38599591, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7501332, "CreationDate": "05/16/2023 13:25:31", "VersionNumber": 1.0, "Title": "EDA:reform", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 15.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # データを読み込む import pandas as pd data_df = pd.read_csv("/kaggle/input/kdl-datascience-compe/train.csv") data_df.columns # # 改装について # 改装すると価格が上がる気がするので可視化してみる。 data_df[["改装", "取引価格(総額)_log"]].groupby("改装").mean()
false
0
113
0
113
113
129711281
<jupyter_start><jupyter_text>one_year_retail_transactions The dataset contains information related to retail transactions in the Gulf region. Here is a brief description of each column: UCID: Unique Customer ID. GENDER: Gender of the customer. AGE_GROUP2: Age group of the customer. MEMBERSHIP_DATE: Date when the customer became a member. BRAND_REPORTING: Brand associated with the transaction. LOCATION_NAME_REPORTING: Name of the location where the transaction occurred. STORE_COUNTRY: Country where the store is located. BUSINESS_CHANNEL: Channel through which the transaction was made (e.g., offline, online). INVOICE_NO: Invoice number associated with the transaction. NET_SALES_AMOUNT: Net sales amount for the transaction. SALES_QTY: Quantity of items sold in the transaction. DISCOUNT_AMOUNT: Amount of discount applied to the transaction. DAY_DT: Date of the transaction. DIV_NAME: Division name (category) of the purchased item. DEPT_NAME: Department name of the purchased item. CLASS_NAME: Class name of the purchased item. SUBCLASS_NAME: Subclass name of the purchased item. Kaggle dataset identifier: one-year-retail-transactions <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # # First exercise # A step-by-step process to conduct a detailed explanatory data analysis for behavioral segmentation # ### Step 1: Import the necessary libraries and load the dataset. import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Load the dataset df = pd.read_excel( "/kaggle/input/one-year-retail-transactions/Retail_Transactions.xlsx" ) # Display the first few rows of the dataset df.head() # ### Step 2: Explore the dataset to gain initial insights. # Check the summary statistics of numerical columns df.describe() # Check the data types and missing values df.info() # ### Step 3: Perform data cleaning and preprocessing (if necessary). # Handle missing values (if any) df.dropna(inplace=True) # Remove duplicates (if any) df.drop_duplicates(inplace=True) # ### Step 4: Explore the distribution of variables and identify key trends. # Remove leading spaces from column names df.columns = df.columns.str.strip() # Verify the updated column names print(df.columns) # Explore the distribution of categorical variables plt.figure(figsize=(10, 6)) sns.countplot(x="GENDER", data=df) plt.title("Gender Distribution") plt.show() # Explore the distribution of numerical variables plt.figure(figsize=(10, 6)) sns.histplot(x="AGE_GROUP2", data=df, bins=10) plt.title("Age Group Distribution") plt.show() # Explore the sales trends over time plt.figure(figsize=(10, 6)) sns.lineplot(x="DAY_DT", y="NET_SALES_AMOUNT", data=df) plt.title("Sales Trends Over Time") plt.show() # ### Step 5: Analyze customer behavior and identify patterns. # Calculate and visualize average sales by brand brand_sales = ( df.groupby("BRAND_REPORTING")["NET_SALES_AMOUNT"] .mean() .sort_values(ascending=False) ) plt.figure(figsize=(10, 6)) sns.barplot(x=brand_sales.index, y=brand_sales.values) plt.title("Average Sales by Brand") plt.xticks(rotation=90) plt.show() # Analyze purchase patterns by age group age_group_sales = df.groupby("AGE_GROUP2")["NET_SALES_AMOUNT"].sum() plt.figure(figsize=(10, 6)) sns.barplot(x=age_group_sales.index, y=age_group_sales.values) plt.title("Total Sales by Age Group") plt.show() # # Exercise Two: Create a customer value-based segmentation # In this exercise, # * we will calculate customer metrics such as total sales, purchase frequency, and average order value. # * Then, will explore the distribution of these metrics to understand their patterns. # * We will define segmentation criteria based on quantiles of these metrics and create customer segments accordingly. # * Finally, we will visualize the customer segments. # ### Step 1: Calculate customer metrics for segmentation. # Calculate customer metrics customer_metrics = ( df.groupby("UCID") .agg( total_sales=("NET_SALES_AMOUNT", "sum"), purchase_frequency=("INVOICE_NO", "nunique"), average_order_value=("NET_SALES_AMOUNT", "mean"), ) .reset_index() ) # ### Step 2: Explore the distribution of customer metrics. # Explore the distribution of total sales plt.figure(figsize=(10, 6)) sns.histplot(x="total_sales", data=customer_metrics, bins=10) plt.title("Total Sales Distribution") plt.show() # Explore the distribution of purchase frequency plt.figure(figsize=(10, 6)) sns.countplot(x="purchase_frequency", data=customer_metrics) plt.title("Purchase Frequency Distribution") plt.show() # Explore the distribution of average order value plt.figure(figsize=(10, 6)) sns.histplot(x="average_order_value", data=customer_metrics, bins=10) plt.title("Average Order Value Distribution") plt.show() # ### Step 3: Segment customers based on their metrics. # # Define segmentation criteria high_value_threshold = customer_metrics["total_sales"].quantile(0.75) frequent_shopper_threshold = customer_metrics["purchase_frequency"].quantile(0.75) # Create segments based on metrics customer_segments = [] for index, row in customer_metrics.iterrows(): segment = "" if row["total_sales"] > high_value_threshold: segment += "High-Value " else: segment += "Low-Value " if row["purchase_frequency"] > frequent_shopper_threshold: segment += "Frequent Shopper" else: segment += "Infrequent Shopper" customer_segments.append(segment) # Add segments to the customer metrics dataframe customer_metrics["segment"] = customer_segments # ### Step 4: Visualize the customer segments. # # Visualize customer segments plt.figure(figsize=(10, 6)) sns.countplot(x="segment", data=customer_metrics) plt.title("Customer Segmentation") plt.xticks(rotation=45) plt.show() # # Third Exercise: creating a behavioral segmentation to identify customer preferences. # ### Step 1: Identify relevant behavioral attributes for segmentation. # Group data by relevant attributes and calculate aggregated metrics preferences = ( df.groupby(["UCID", "BRAND_REPORTING", "DIV_NAME"]) .agg( total_sales=("NET_SALES_AMOUNT", "sum"), purchase_count=("INVOICE_NO", "nunique"), ) .reset_index() ) # Pivot the data to create a preference matrix preference_matrix = preferences.pivot_table( index="UCID", columns=["BRAND_REPORTING", "DIV_NAME"], values="total_sales", fill_value=0, ) # Perform clustering (e.g., K-means) on the preference matrix from sklearn.cluster import KMeans n_clusters = 4 # Define the number of clusters kmeans = KMeans(n_clusters=n_clusters, random_state=42) clusters = kmeans.fit_predict(preference_matrix) # Add the cluster labels to the preference matrix preference_matrix["Cluster"] = clusters # ### Step 2: Analyze customer preferences and create segments. # Group data by relevant attributes and calculate aggregated metrics preferences = ( df.groupby(["UCID", "BRAND_REPORTING", "DIV_NAME"]) .agg( total_sales=("NET_SALES_AMOUNT", "sum"), purchase_count=("INVOICE_NO", "nunique"), ) .reset_index() ) # Pivot the data to create a preference matrix preference_matrix = preferences.pivot_table( index="UCID", columns=["BRAND_REPORTING", "DIV_NAME"], values="total_sales", fill_value=0, ) # Perform clustering (e.g., K-means) on the preference matrix from sklearn.cluster import KMeans n_clusters = 4 # Define the number of clusters n_init = 10 # Set the value of n_init explicitly kmeans = KMeans(n_clusters=n_clusters, n_init=n_init, random_state=42) clusters = kmeans.fit_predict(preference_matrix.values) # Add the cluster labels to the preference matrix preference_matrix["Cluster"] = clusters # ### Step 3: Visualize the behavioral segments. # # Plot the clusters plt.figure(figsize=(10, 6)) sns.scatterplot( x=preference_matrix.index, y=preference_matrix.sum(axis=1), # Update the column name here hue="Cluster", palette="viridis", data=preference_matrix, ) plt.title("Behavioral Segmentation") plt.xlabel("Customer ID") plt.ylabel("Total Sales") plt.legend(title="Cluster") plt.show() # # Exercise Four: Cohort analysis to show acquisition and retention trends # Convert 'MEMBERSHIP_DATE' column to datetime df["MEMBERSHIP_DATE"] = pd.to_datetime(df["MEMBERSHIP_DATE"]) # Create 'MembershipYearMonth' column df["MembershipYearMonth"] = df["MEMBERSHIP_DATE"].dt.to_period("M") # Group the data by MembershipYearMonth and calculate the initial and total number of customers cohort_data = df.groupby("MembershipYearMonth").agg( InitialCustomers=("UCID", "nunique"), TotalCustomers=("UCID", "count") ) # Calculate the retention rate cohort_data["RetentionRate"] = ( cohort_data["TotalCustomers"] / cohort_data["InitialCustomers"] ) # Convert 'RetentionRate' column to numeric type cohort_data["RetentionRate"] = pd.to_numeric( cohort_data["RetentionRate"], errors="coerce" ) # Fill missing values with 1 cohort_data["RetentionRate"] = cohort_data["RetentionRate"].fillna(1) import matplotlib.pyplot as plt # Line plot for InitialCustomers plt.figure(figsize=(10, 6)) cohort_data["InitialCustomers"].plot(marker="o") plt.title("Initial Customers Over Time") plt.xlabel("Membership Year-Month") plt.ylabel("Number of Customers") plt.grid(True) plt.show() # Line plot for TotalCustomers plt.figure(figsize=(10, 6)) cohort_data["TotalCustomers"].plot(marker="o") plt.title("Total Customers Over Time") plt.xlabel("Membership Year-Month") plt.ylabel("Number of Customers") plt.grid(True) plt.show() # Line plot for RetentionRate plt.figure(figsize=(10, 6)) cohort_data["RetentionRate"].plot(marker="o") plt.title("Retention Rate Over Time") plt.xlabel("Membership Year-Month") plt.ylabel("Retention Rate") plt.ylim(0, 1) plt.grid(True) plt.show() # # Step 1: Preprocess the data # df['MEMBERSHIP_DATE'] = pd.to_datetime(df['MEMBERSHIP_DATE']) # df['YearMonth'] = df['MEMBERSHIP_DATE'].dt.to_period('M') # # Step 2: Calculate the number of unique customers by cohort and month # cohort_data = df.groupby(['YearMonth', 'UCID']).size().reset_index(name='NumCustomers') # # Step 3: Create a pivot table to calculate monthly active customers by cohort and month # cohort_matrix = cohort_data.pivot_table(index='YearMonth', columns='UCID', values='NumCustomers', aggfunc='count') # # Step 4: Calculate the retention rates # cohort_size = cohort_matrix.iloc[:, 0] # retention_matrix = cohort_matrix.divide(cohort_size, axis=0) # # Step 5: Visualize the retention rates using a heatmap # plt.figure(figsize=(12, 8)) # plt.title('Cohort Analysis - Retention Rates') # sns.heatmap(retention_matrix, annot=True, fmt='.0%', cmap='YlGnBu', vmin=0, vmax=1, cbar=False) # plt.show() # # Step 6: Calculate and plot the cohort sizes # cohort_sizes = cohort_matrix.sum(axis=0) # cohort_sizes.plot(kind='bar', figsize=(10, 6)) # plt.title('Cohort Sizes') # plt.xlabel('Cohort') # plt.ylabel('Number of Customers') # plt.show() # Step 7: Calculate and plot the retention rates over time retention_over_time = retention_matrix.mean() retention_over_time.plot(figsize=(10, 6)) plt.title("Retention Rates Over Time") plt.xlabel("Months since First Purchase") plt.ylabel("Retention Rate") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/711/129711281.ipynb
one-year-retail-transactions
mustafaabdelnasser16
[{"Id": 129711281, "ScriptId": 38572052, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12057103, "CreationDate": "05/16/2023 00:32:33", "VersionNumber": 1.0, "Title": "customer behavioral segmentation", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 334.0, "LinesInsertedFromPrevious": 334.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186046699, "KernelVersionId": 129711281, "SourceDatasetVersionId": 5693599}]
[{"Id": 5693599, "DatasetId": 3273701, "DatasourceVersionId": 5769220, "CreatorUserId": 12057103, "LicenseName": "Unknown", "CreationDate": "05/15/2023 20:48:06", "VersionNumber": 1.0, "Title": "one_year_retail_transactions", "Slug": "one-year-retail-transactions", "Subtitle": NaN, "Description": "The dataset contains information related to retail transactions in the Gulf region. Here is a brief description of each column:\n\nUCID: Unique Customer ID.\nGENDER: Gender of the customer.\nAGE_GROUP2: Age group of the customer.\nMEMBERSHIP_DATE: Date when the customer became a member.\nBRAND_REPORTING: Brand associated with the transaction.\nLOCATION_NAME_REPORTING: Name of the location where the transaction occurred.\nSTORE_COUNTRY: Country where the store is located.\nBUSINESS_CHANNEL: Channel through which the transaction was made (e.g., offline, online).\nINVOICE_NO: Invoice number associated with the transaction.\nNET_SALES_AMOUNT: Net sales amount for the transaction.\nSALES_QTY: Quantity of items sold in the transaction.\nDISCOUNT_AMOUNT: Amount of discount applied to the transaction.\nDAY_DT: Date of the transaction.\nDIV_NAME: Division name (category) of the purchased item.\nDEPT_NAME: Department name of the purchased item.\nCLASS_NAME: Class name of the purchased item.\nSUBCLASS_NAME: Subclass name of the purchased item.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3273701, "CreatorUserId": 12057103, "OwnerUserId": 12057103.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5706612.0, "CurrentDatasourceVersionId": 5782676.0, "ForumId": 3339356, "Type": 2, "CreationDate": "05/15/2023 20:48:06", "LastActivityDate": "05/15/2023", "TotalViews": 284, "TotalDownloads": 60, "TotalVotes": 2, "TotalKernels": 1}]
[{"Id": 12057103, "UserName": "mustafaabdelnasser16", "DisplayName": "Mustafa Abd El-Nasser", "RegisterDate": "10/22/2022", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # # First exercise # A step-by-step process to conduct a detailed explanatory data analysis for behavioral segmentation # ### Step 1: Import the necessary libraries and load the dataset. import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Load the dataset df = pd.read_excel( "/kaggle/input/one-year-retail-transactions/Retail_Transactions.xlsx" ) # Display the first few rows of the dataset df.head() # ### Step 2: Explore the dataset to gain initial insights. # Check the summary statistics of numerical columns df.describe() # Check the data types and missing values df.info() # ### Step 3: Perform data cleaning and preprocessing (if necessary). # Handle missing values (if any) df.dropna(inplace=True) # Remove duplicates (if any) df.drop_duplicates(inplace=True) # ### Step 4: Explore the distribution of variables and identify key trends. # Remove leading spaces from column names df.columns = df.columns.str.strip() # Verify the updated column names print(df.columns) # Explore the distribution of categorical variables plt.figure(figsize=(10, 6)) sns.countplot(x="GENDER", data=df) plt.title("Gender Distribution") plt.show() # Explore the distribution of numerical variables plt.figure(figsize=(10, 6)) sns.histplot(x="AGE_GROUP2", data=df, bins=10) plt.title("Age Group Distribution") plt.show() # Explore the sales trends over time plt.figure(figsize=(10, 6)) sns.lineplot(x="DAY_DT", y="NET_SALES_AMOUNT", data=df) plt.title("Sales Trends Over Time") plt.show() # ### Step 5: Analyze customer behavior and identify patterns. # Calculate and visualize average sales by brand brand_sales = ( df.groupby("BRAND_REPORTING")["NET_SALES_AMOUNT"] .mean() .sort_values(ascending=False) ) plt.figure(figsize=(10, 6)) sns.barplot(x=brand_sales.index, y=brand_sales.values) plt.title("Average Sales by Brand") plt.xticks(rotation=90) plt.show() # Analyze purchase patterns by age group age_group_sales = df.groupby("AGE_GROUP2")["NET_SALES_AMOUNT"].sum() plt.figure(figsize=(10, 6)) sns.barplot(x=age_group_sales.index, y=age_group_sales.values) plt.title("Total Sales by Age Group") plt.show() # # Exercise Two: Create a customer value-based segmentation # In this exercise, # * we will calculate customer metrics such as total sales, purchase frequency, and average order value. # * Then, will explore the distribution of these metrics to understand their patterns. # * We will define segmentation criteria based on quantiles of these metrics and create customer segments accordingly. # * Finally, we will visualize the customer segments. # ### Step 1: Calculate customer metrics for segmentation. # Calculate customer metrics customer_metrics = ( df.groupby("UCID") .agg( total_sales=("NET_SALES_AMOUNT", "sum"), purchase_frequency=("INVOICE_NO", "nunique"), average_order_value=("NET_SALES_AMOUNT", "mean"), ) .reset_index() ) # ### Step 2: Explore the distribution of customer metrics. # Explore the distribution of total sales plt.figure(figsize=(10, 6)) sns.histplot(x="total_sales", data=customer_metrics, bins=10) plt.title("Total Sales Distribution") plt.show() # Explore the distribution of purchase frequency plt.figure(figsize=(10, 6)) sns.countplot(x="purchase_frequency", data=customer_metrics) plt.title("Purchase Frequency Distribution") plt.show() # Explore the distribution of average order value plt.figure(figsize=(10, 6)) sns.histplot(x="average_order_value", data=customer_metrics, bins=10) plt.title("Average Order Value Distribution") plt.show() # ### Step 3: Segment customers based on their metrics. # # Define segmentation criteria high_value_threshold = customer_metrics["total_sales"].quantile(0.75) frequent_shopper_threshold = customer_metrics["purchase_frequency"].quantile(0.75) # Create segments based on metrics customer_segments = [] for index, row in customer_metrics.iterrows(): segment = "" if row["total_sales"] > high_value_threshold: segment += "High-Value " else: segment += "Low-Value " if row["purchase_frequency"] > frequent_shopper_threshold: segment += "Frequent Shopper" else: segment += "Infrequent Shopper" customer_segments.append(segment) # Add segments to the customer metrics dataframe customer_metrics["segment"] = customer_segments # ### Step 4: Visualize the customer segments. # # Visualize customer segments plt.figure(figsize=(10, 6)) sns.countplot(x="segment", data=customer_metrics) plt.title("Customer Segmentation") plt.xticks(rotation=45) plt.show() # # Third Exercise: creating a behavioral segmentation to identify customer preferences. # ### Step 1: Identify relevant behavioral attributes for segmentation. # Group data by relevant attributes and calculate aggregated metrics preferences = ( df.groupby(["UCID", "BRAND_REPORTING", "DIV_NAME"]) .agg( total_sales=("NET_SALES_AMOUNT", "sum"), purchase_count=("INVOICE_NO", "nunique"), ) .reset_index() ) # Pivot the data to create a preference matrix preference_matrix = preferences.pivot_table( index="UCID", columns=["BRAND_REPORTING", "DIV_NAME"], values="total_sales", fill_value=0, ) # Perform clustering (e.g., K-means) on the preference matrix from sklearn.cluster import KMeans n_clusters = 4 # Define the number of clusters kmeans = KMeans(n_clusters=n_clusters, random_state=42) clusters = kmeans.fit_predict(preference_matrix) # Add the cluster labels to the preference matrix preference_matrix["Cluster"] = clusters # ### Step 2: Analyze customer preferences and create segments. # Group data by relevant attributes and calculate aggregated metrics preferences = ( df.groupby(["UCID", "BRAND_REPORTING", "DIV_NAME"]) .agg( total_sales=("NET_SALES_AMOUNT", "sum"), purchase_count=("INVOICE_NO", "nunique"), ) .reset_index() ) # Pivot the data to create a preference matrix preference_matrix = preferences.pivot_table( index="UCID", columns=["BRAND_REPORTING", "DIV_NAME"], values="total_sales", fill_value=0, ) # Perform clustering (e.g., K-means) on the preference matrix from sklearn.cluster import KMeans n_clusters = 4 # Define the number of clusters n_init = 10 # Set the value of n_init explicitly kmeans = KMeans(n_clusters=n_clusters, n_init=n_init, random_state=42) clusters = kmeans.fit_predict(preference_matrix.values) # Add the cluster labels to the preference matrix preference_matrix["Cluster"] = clusters # ### Step 3: Visualize the behavioral segments. # # Plot the clusters plt.figure(figsize=(10, 6)) sns.scatterplot( x=preference_matrix.index, y=preference_matrix.sum(axis=1), # Update the column name here hue="Cluster", palette="viridis", data=preference_matrix, ) plt.title("Behavioral Segmentation") plt.xlabel("Customer ID") plt.ylabel("Total Sales") plt.legend(title="Cluster") plt.show() # # Exercise Four: Cohort analysis to show acquisition and retention trends # Convert 'MEMBERSHIP_DATE' column to datetime df["MEMBERSHIP_DATE"] = pd.to_datetime(df["MEMBERSHIP_DATE"]) # Create 'MembershipYearMonth' column df["MembershipYearMonth"] = df["MEMBERSHIP_DATE"].dt.to_period("M") # Group the data by MembershipYearMonth and calculate the initial and total number of customers cohort_data = df.groupby("MembershipYearMonth").agg( InitialCustomers=("UCID", "nunique"), TotalCustomers=("UCID", "count") ) # Calculate the retention rate cohort_data["RetentionRate"] = ( cohort_data["TotalCustomers"] / cohort_data["InitialCustomers"] ) # Convert 'RetentionRate' column to numeric type cohort_data["RetentionRate"] = pd.to_numeric( cohort_data["RetentionRate"], errors="coerce" ) # Fill missing values with 1 cohort_data["RetentionRate"] = cohort_data["RetentionRate"].fillna(1) import matplotlib.pyplot as plt # Line plot for InitialCustomers plt.figure(figsize=(10, 6)) cohort_data["InitialCustomers"].plot(marker="o") plt.title("Initial Customers Over Time") plt.xlabel("Membership Year-Month") plt.ylabel("Number of Customers") plt.grid(True) plt.show() # Line plot for TotalCustomers plt.figure(figsize=(10, 6)) cohort_data["TotalCustomers"].plot(marker="o") plt.title("Total Customers Over Time") plt.xlabel("Membership Year-Month") plt.ylabel("Number of Customers") plt.grid(True) plt.show() # Line plot for RetentionRate plt.figure(figsize=(10, 6)) cohort_data["RetentionRate"].plot(marker="o") plt.title("Retention Rate Over Time") plt.xlabel("Membership Year-Month") plt.ylabel("Retention Rate") plt.ylim(0, 1) plt.grid(True) plt.show() # # Step 1: Preprocess the data # df['MEMBERSHIP_DATE'] = pd.to_datetime(df['MEMBERSHIP_DATE']) # df['YearMonth'] = df['MEMBERSHIP_DATE'].dt.to_period('M') # # Step 2: Calculate the number of unique customers by cohort and month # cohort_data = df.groupby(['YearMonth', 'UCID']).size().reset_index(name='NumCustomers') # # Step 3: Create a pivot table to calculate monthly active customers by cohort and month # cohort_matrix = cohort_data.pivot_table(index='YearMonth', columns='UCID', values='NumCustomers', aggfunc='count') # # Step 4: Calculate the retention rates # cohort_size = cohort_matrix.iloc[:, 0] # retention_matrix = cohort_matrix.divide(cohort_size, axis=0) # # Step 5: Visualize the retention rates using a heatmap # plt.figure(figsize=(12, 8)) # plt.title('Cohort Analysis - Retention Rates') # sns.heatmap(retention_matrix, annot=True, fmt='.0%', cmap='YlGnBu', vmin=0, vmax=1, cbar=False) # plt.show() # # Step 6: Calculate and plot the cohort sizes # cohort_sizes = cohort_matrix.sum(axis=0) # cohort_sizes.plot(kind='bar', figsize=(10, 6)) # plt.title('Cohort Sizes') # plt.xlabel('Cohort') # plt.ylabel('Number of Customers') # plt.show() # Step 7: Calculate and plot the retention rates over time retention_over_time = retention_matrix.mean() retention_over_time.plot(figsize=(10, 6)) plt.title("Retention Rates Over Time") plt.xlabel("Months since First Purchase") plt.ylabel("Retention Rate") plt.show()
false
0
3,080
0
3,368
3,080
129685475
<jupyter_start><jupyter_text>Wild blueberry Yield Prediction Dataset ### Context Blueberries are perennial flowering plants with blue or purple berries. They are classified in the section Cyanococcus within the genus Vaccinium. Vaccinium also includes cranberries, bilberries, huckleberries, and Madeira blueberries. Commercial blueberries—both wild (lowbush) and cultivated (highbush)—are all native to North America. The highbush varieties were introduced into Europe during the 1930s. Blueberries are usually prostrate shrubs that can vary in size from 10 centimeters (4 inches) to 4 meters (13 feet) in height. In the commercial production of blueberries, the species with small, pea-size berries growing on low-level bushes are known as "lowbush blueberries" (synonymous with "wild"), while the species with larger berries growing on taller, cultivated bushes are known as "highbush blueberries". Canada is the leading producer of lowbush blueberries, while the United States produces some 40% of the world s supply of highbush blueberries. ### Content "The dataset used for predictive modeling was generated by the Wild Blueberry Pollination Simulation Model, which is an open-source, spatially-explicit computer simulation program that enables exploration of how various factors, including plant spatial arrangement, outcrossing and self-pollination, bee species compositions and weather conditions, in isolation and combination, affect pollination efficiency and yield of the wild blueberry agroecosystem. The simulation model has been validated by the field observation and experimental data collected in Maine USA and Canadian Maritimes during the last 30 years and now is a useful tool for hypothesis testing and theory development for wild blueberry pollination researches." Features Unit Description Clonesize m2 The average blueberry clone size in the field Honeybee bees/m2/min Honeybee density in the field Bumbles bees/m2/min Bumblebee density in the field Andrena bees/m2/min Andrena bee density in the field Osmia bees/m2/min Osmia bee density in the field MaxOfUpperTRange ℃ The highest record of the upper band daily air temperature during the bloom season MinOfUpperTRange ℃ The lowest record of the upper band daily air temperature AverageOfUpperTRange ℃ The average of the upper band daily air temperature MaxOfLowerTRange ℃ The highest record of the lower band daily air temperature MinOfLowerTRange ℃ The lowest record of the lower band daily air temperature AverageOfLowerTRange ℃ The average of the lower band daily air temperature RainingDays Day The total number of days during the bloom season, each of which has precipitation larger than zero AverageRainingDays Day The average of raining days of the entire bloom season Kaggle dataset identifier: wild-blueberry-yield-prediction-dataset <jupyter_script># # # Load Python Pakages # # basics import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # preprocessing from sklearn.preprocessing import ( StandardScaler, RobustScaler, MinMaxScaler, PowerTransformer, ) # statistics from scipy import stats from scipy.stats import skew from scipy.special import boxcox1p # feature engineering from sklearn.feature_selection import mutual_info_regression # transformers and pipeline from sklearn.base import BaseEstimator, TransformerMixin from sklearn.compose import ColumnTransformer, make_column_transformer from sklearn.preprocessing import FunctionTransformer from sklearn.pipeline import Pipeline, make_pipeline from sklearn import set_config # algorithms from lightgbm import LGBMRegressor # model evaluation from sklearn.model_selection import GridSearchCV, cross_val_score, cross_validate from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ShuffleSplit from sklearn.metrics import mean_absolute_error, mean_squared_error # # # First look to data # Read the data train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv", index_col="id") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv", index_col="id") original = pd.read_csv( "/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv", index_col=[0], ) # reserved for pipeline pipe_data = train.copy() pipe_test = test.copy() pipe_original = original.copy() # use for preliminary analysis train_df = train.copy() test_df = test.copy() original_df = original.copy() train_df.head() original_df.index.names = ["id"] original_df.head() train_df = pd.concat([train_df, original_df]) train_df.head() train_df.info() # is there any missing value? train_df.isnull().any().any() # ## Descpriptive statistics # numerical feature descriptive statistics train_df.describe().T # ## Grouping features for preprocessing purposes train_df.nunique().sort_values() # Just bookkeeping feature_list = [feature for feature in train_df.columns if not feature == "yield"] continuous_features = ["fruitmass", "fruitset", "seeds"] discrete_features = list(set(feature_list) - set(continuous_features)) assert feature_list.sort() == (continuous_features + discrete_features).sort() # # # Exploratory Data Analysis # Let's obsorve how target variable changes with features. fig, ax = plt.subplots(6, 3, figsize=(40, 20)) for var, subplot in zip(feature_list, ax.flatten()): sns.scatterplot(x=var, y="yield", data=train_df, ax=subplot, hue="yield") # Observations: # * There are strong correlation between continuous_features and target # Let's look at correlations between features and the target with a more quantitative way.. # Display correlations between features and yield on heatmap. sns.set(font_scale=1.1) correlation_train = train_df.corr() mask = np.triu(correlation_train.corr()) plt.figure(figsize=(15, 15)) sns.heatmap( correlation_train, annot=True, fmt=".1f", cmap="coolwarm", square=True, mask=mask, linewidths=1, cbar=False, ) # Mutual information is another measure, which also capable to measure more diverse relationships and good at categorical and discrete variables. y = train_df["yield"] # determine the mutual information for numerical features # You need to fillna to get results from mutual_info_regression function mutual_df = train_df[feature_list] mutual_info = mutual_info_regression(mutual_df, y, random_state=1) mutual_info = pd.Series(mutual_info) mutual_info.index = mutual_df.columns pd.DataFrame( mutual_info.sort_values(ascending=False), columns=["MI_score"] ).style.background_gradient("cool") # # # Feature Engineering # Let's define some new features. train_df["total_bee_density"] = ( train_df["honeybee"] + train_df["bumbles"] + train_df["andrena"] + train_df["osmia"] ) train_df["bee_to_clone"] = train_df["total_bee_density"] / train_df["clonesize"] train_df["Max_temp_difference"] = ( train_df["MaxOfUpperTRange"] - train_df["MinOfLowerTRange"] ) train_df["Avarage_temp_difference"] = ( train_df["AverageOfUpperTRange"] - train_df["AverageOfLowerTRange"] ) train_df["mass_set"] = train_df["fruitmass"] * train_df["fruitset"] train_df["mass_seed"] = train_df["fruitmass"] * train_df["seeds"] train_df["set_seed"] = train_df["fruitset"] * train_df["seeds"] train_df["mass_ser_seed"] = ( train_df["fruitmass"] * train_df["fruitset"] * train_df["seeds"] ) new_features = [ "total_bee_density", "bee_to_clone", "Max_temp_difference", "Avarage_temp_difference", "mass_set", "mass_seed", "set_seed", "mass_ser_seed", ] # Let's check new features mutual information scores... mutual_df = train_df[new_features] mutual_info = mutual_info_regression(mutual_df, y, random_state=1) mutual_info = pd.Series(mutual_info) mutual_info.index = mutual_df.columns pd.DataFrame( mutual_info.sort_values(ascending=False), columns=["New_Feature_MI"] ).style.background_gradient("cool") fig, ax = plt.subplots(3, 3, figsize=(20, 20)) for var, subplot in zip(new_features, ax.flatten()): sns.scatterplot(x=var, y="yield", data=train_df, ax=subplot, hue="yield") updated_feature_list = train_df.columns.to_list() updated_continuous_features = [ "total_bee_density", "bee_to_clone", "fruitmass", "fruitset", "seeds", "mass_set", "mass_seed", "set_seed", "mass_ser_seed", ] updated_discrete_features = list( set(updated_feature_list) - set(updated_continuous_features) ) assert ( updated_feature_list.sort() == (updated_continuous_features + updated_discrete_features).sort() ) # # ## A custom pipeline for Feature Engineering class FeatureCreator(BaseEstimator, TransformerMixin): def __init__(self, add_attributes=True): self.add_attributes = add_attributes def fit(self, X, y=None): return self def transform(self, X): if self.add_attributes: X_copy = X.copy() X_copy["total_bee_density"] = ( X_copy["honeybee"] + X_copy["bumbles"] + X_copy["andrena"] + X_copy["osmia"] ) X_copy["bee_to_clone"] = X_copy["total_bee_density"] / X_copy["clonesize"] X_copy["Max_temp_difference"] = ( X_copy["MaxOfUpperTRange"] - X_copy["MinOfLowerTRange"] ) X_copy["Avarage_temp_difference"] = ( X_copy["AverageOfUpperTRange"] - X_copy["AverageOfLowerTRange"] ) X_copy["mass_set"] = X_copy["fruitmass"] * X_copy["fruitset"] X_copy["mass_seed"] = X_copy["fruitmass"] * X_copy["seeds"] X_copy["set_seed"] = X_copy["fruitset"] * X_copy["seeds"] X_copy["mass_ser_seed"] = ( X_copy["fruitmass"] * X_copy["fruitset"] * X_copy["seeds"] ) X_copy = X_copy.drop( [ "MaxOfLowerTRange", "MaxOfUpperTRange", "MinOfLowerTRange", "MinOfUpperTRange", ], axis=1, ) return X_copy else: return X_copy Creator = FeatureCreator(add_attributes=True) # # # Putting pieces together # Okay...We are almost ready to start the modeling. Before moving on we will make first touch with the data that we reserved for the pipeline. Let's separate target and features. pipe_original.index.names = ["id"] pipe_original.head() pipe_data = pipe_data.sample(frac=1, random_state=0) pipe_data = pd.concat([pipe_data, pipe_original]) pipe_data.info() y = pipe_data["yield"] pipe_data = pipe_data.drop("yield", axis=1) # # # Scikit-learn pipeline with AutoML # flaml from flaml import AutoML automl = AutoML() automl_pipeline = Pipeline([("Creator", Creator), ("automl", automl)]) automl_pipeline # Specify automl goal and constraint automl_settings = { "time_budget": 7500, "metric": "mae", "task": "regression", "seed": 7654321, "ensemble": True, } pipeline_settings = {f"automl__{key}": value for key, value in automl_settings.items()} automl_pipeline = automl_pipeline.fit(pipe_data, y, **pipeline_settings) preds_test = automl_pipeline.predict(pipe_test) # ### # # Submission output = pd.DataFrame({"id": pipe_test.index, "yield": preds_test}) output.to_csv("submission.csv", index=False) output.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/685/129685475.ipynb
wild-blueberry-yield-prediction-dataset
shashwatwork
[{"Id": 129685475, "ScriptId": 38325284, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9785820, "CreationDate": "05/15/2023 18:26:25", "VersionNumber": 2.0, "Title": "Sklearn Pipeline with flaml", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 274.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 269.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186008507, "KernelVersionId": 129685475, "SourceDatasetVersionId": 2462316}]
[{"Id": 2462316, "DatasetId": 1490445, "DatasourceVersionId": 2504743, "CreatorUserId": 1444085, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "07/25/2021 17:48:21", "VersionNumber": 2.0, "Title": "Wild blueberry Yield Prediction Dataset", "Slug": "wild-blueberry-yield-prediction-dataset", "Subtitle": "Predict the yield of Wild Blueberry", "Description": "### Context\n\nBlueberries are perennial flowering plants with blue or purple berries. They are classified in the section Cyanococcus within the genus Vaccinium. Vaccinium also includes cranberries, bilberries, huckleberries, and Madeira blueberries. Commercial blueberries\u2014both wild (lowbush) and cultivated (highbush)\u2014are all native to North America. The highbush varieties were introduced into Europe during the 1930s.\n\nBlueberries are usually prostrate shrubs that can vary in size from 10 centimeters (4 inches) to 4 meters (13 feet) in height. In the commercial production of blueberries, the species with small, pea-size berries growing on low-level bushes are known as \"lowbush blueberries\" (synonymous with \"wild\"), while the species with larger berries growing on taller, cultivated bushes are known as \"highbush blueberries\". Canada is the leading producer of lowbush blueberries, while the United States produces some 40% of the world s supply of highbush blueberries.\n\n### Content\n\n\"The dataset used for predictive modeling was generated by the Wild Blueberry Pollination Simulation Model, which is an open-source, spatially-explicit computer simulation program that enables exploration of how various factors, including plant spatial arrangement, outcrossing and self-pollination, bee species compositions and weather conditions, in isolation and combination, affect pollination efficiency and yield of the wild blueberry agroecosystem. The simulation model has been validated by the field observation and experimental data collected in Maine USA and Canadian Maritimes during the last 30 years and now is a useful tool for hypothesis testing and theory development for wild blueberry pollination researches.\"\n\nFeatures \tUnit\tDescription\nClonesize\tm2\tThe average blueberry clone size in the field\nHoneybee\tbees/m2/min\tHoneybee density in the field\nBumbles\tbees/m2/min\tBumblebee density in the field\nAndrena\tbees/m2/min\tAndrena bee density in the field\nOsmia\tbees/m2/min\tOsmia bee density in the field\nMaxOfUpperTRange\t\u2103\tThe highest record of the upper band daily air temperature during the bloom season\nMinOfUpperTRange\t\u2103\tThe lowest record of the upper band daily air temperature\nAverageOfUpperTRange\t\u2103\tThe average of the upper band daily air temperature\nMaxOfLowerTRange\t\u2103\tThe highest record of the lower band daily air temperature\nMinOfLowerTRange\t\u2103\tThe lowest record of the lower band daily air temperature\nAverageOfLowerTRange\t\u2103\tThe average of the lower band daily air temperature\nRainingDays\tDay\tThe total number of days during the bloom season, each of which has precipitation larger than zero\nAverageRainingDays\tDay\tThe average of raining days of the entire bloom season\n\n### Acknowledgements\n\nQu, Hongchun; Obsie, Efrem; Drummond, Frank (2020), \u201cData for: Wild blueberry yield prediction using a combination of computer simulation and machine learning algorithms\u201d, Mendeley Data, V1, doi: 10.17632/p5hvjzsvn8.1\n\nDataset is outsourced from [here.](https://data.mendeley.com/datasets/p5hvjzsvn8/1)", "VersionNotes": "updated", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1490445, "CreatorUserId": 1444085, "OwnerUserId": 1444085.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2462316.0, "CurrentDatasourceVersionId": 2504743.0, "ForumId": 1510148, "Type": 2, "CreationDate": "07/25/2021 17:47:00", "LastActivityDate": "07/25/2021", "TotalViews": 11876, "TotalDownloads": 1130, "TotalVotes": 48, "TotalKernels": 82}]
[{"Id": 1444085, "UserName": "shashwatwork", "DisplayName": "Shashwat Tiwari", "RegisterDate": "11/24/2017", "PerformanceTier": 2}]
# # # Load Python Pakages # # basics import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # preprocessing from sklearn.preprocessing import ( StandardScaler, RobustScaler, MinMaxScaler, PowerTransformer, ) # statistics from scipy import stats from scipy.stats import skew from scipy.special import boxcox1p # feature engineering from sklearn.feature_selection import mutual_info_regression # transformers and pipeline from sklearn.base import BaseEstimator, TransformerMixin from sklearn.compose import ColumnTransformer, make_column_transformer from sklearn.preprocessing import FunctionTransformer from sklearn.pipeline import Pipeline, make_pipeline from sklearn import set_config # algorithms from lightgbm import LGBMRegressor # model evaluation from sklearn.model_selection import GridSearchCV, cross_val_score, cross_validate from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ShuffleSplit from sklearn.metrics import mean_absolute_error, mean_squared_error # # # First look to data # Read the data train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv", index_col="id") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv", index_col="id") original = pd.read_csv( "/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv", index_col=[0], ) # reserved for pipeline pipe_data = train.copy() pipe_test = test.copy() pipe_original = original.copy() # use for preliminary analysis train_df = train.copy() test_df = test.copy() original_df = original.copy() train_df.head() original_df.index.names = ["id"] original_df.head() train_df = pd.concat([train_df, original_df]) train_df.head() train_df.info() # is there any missing value? train_df.isnull().any().any() # ## Descpriptive statistics # numerical feature descriptive statistics train_df.describe().T # ## Grouping features for preprocessing purposes train_df.nunique().sort_values() # Just bookkeeping feature_list = [feature for feature in train_df.columns if not feature == "yield"] continuous_features = ["fruitmass", "fruitset", "seeds"] discrete_features = list(set(feature_list) - set(continuous_features)) assert feature_list.sort() == (continuous_features + discrete_features).sort() # # # Exploratory Data Analysis # Let's obsorve how target variable changes with features. fig, ax = plt.subplots(6, 3, figsize=(40, 20)) for var, subplot in zip(feature_list, ax.flatten()): sns.scatterplot(x=var, y="yield", data=train_df, ax=subplot, hue="yield") # Observations: # * There are strong correlation between continuous_features and target # Let's look at correlations between features and the target with a more quantitative way.. # Display correlations between features and yield on heatmap. sns.set(font_scale=1.1) correlation_train = train_df.corr() mask = np.triu(correlation_train.corr()) plt.figure(figsize=(15, 15)) sns.heatmap( correlation_train, annot=True, fmt=".1f", cmap="coolwarm", square=True, mask=mask, linewidths=1, cbar=False, ) # Mutual information is another measure, which also capable to measure more diverse relationships and good at categorical and discrete variables. y = train_df["yield"] # determine the mutual information for numerical features # You need to fillna to get results from mutual_info_regression function mutual_df = train_df[feature_list] mutual_info = mutual_info_regression(mutual_df, y, random_state=1) mutual_info = pd.Series(mutual_info) mutual_info.index = mutual_df.columns pd.DataFrame( mutual_info.sort_values(ascending=False), columns=["MI_score"] ).style.background_gradient("cool") # # # Feature Engineering # Let's define some new features. train_df["total_bee_density"] = ( train_df["honeybee"] + train_df["bumbles"] + train_df["andrena"] + train_df["osmia"] ) train_df["bee_to_clone"] = train_df["total_bee_density"] / train_df["clonesize"] train_df["Max_temp_difference"] = ( train_df["MaxOfUpperTRange"] - train_df["MinOfLowerTRange"] ) train_df["Avarage_temp_difference"] = ( train_df["AverageOfUpperTRange"] - train_df["AverageOfLowerTRange"] ) train_df["mass_set"] = train_df["fruitmass"] * train_df["fruitset"] train_df["mass_seed"] = train_df["fruitmass"] * train_df["seeds"] train_df["set_seed"] = train_df["fruitset"] * train_df["seeds"] train_df["mass_ser_seed"] = ( train_df["fruitmass"] * train_df["fruitset"] * train_df["seeds"] ) new_features = [ "total_bee_density", "bee_to_clone", "Max_temp_difference", "Avarage_temp_difference", "mass_set", "mass_seed", "set_seed", "mass_ser_seed", ] # Let's check new features mutual information scores... mutual_df = train_df[new_features] mutual_info = mutual_info_regression(mutual_df, y, random_state=1) mutual_info = pd.Series(mutual_info) mutual_info.index = mutual_df.columns pd.DataFrame( mutual_info.sort_values(ascending=False), columns=["New_Feature_MI"] ).style.background_gradient("cool") fig, ax = plt.subplots(3, 3, figsize=(20, 20)) for var, subplot in zip(new_features, ax.flatten()): sns.scatterplot(x=var, y="yield", data=train_df, ax=subplot, hue="yield") updated_feature_list = train_df.columns.to_list() updated_continuous_features = [ "total_bee_density", "bee_to_clone", "fruitmass", "fruitset", "seeds", "mass_set", "mass_seed", "set_seed", "mass_ser_seed", ] updated_discrete_features = list( set(updated_feature_list) - set(updated_continuous_features) ) assert ( updated_feature_list.sort() == (updated_continuous_features + updated_discrete_features).sort() ) # # ## A custom pipeline for Feature Engineering class FeatureCreator(BaseEstimator, TransformerMixin): def __init__(self, add_attributes=True): self.add_attributes = add_attributes def fit(self, X, y=None): return self def transform(self, X): if self.add_attributes: X_copy = X.copy() X_copy["total_bee_density"] = ( X_copy["honeybee"] + X_copy["bumbles"] + X_copy["andrena"] + X_copy["osmia"] ) X_copy["bee_to_clone"] = X_copy["total_bee_density"] / X_copy["clonesize"] X_copy["Max_temp_difference"] = ( X_copy["MaxOfUpperTRange"] - X_copy["MinOfLowerTRange"] ) X_copy["Avarage_temp_difference"] = ( X_copy["AverageOfUpperTRange"] - X_copy["AverageOfLowerTRange"] ) X_copy["mass_set"] = X_copy["fruitmass"] * X_copy["fruitset"] X_copy["mass_seed"] = X_copy["fruitmass"] * X_copy["seeds"] X_copy["set_seed"] = X_copy["fruitset"] * X_copy["seeds"] X_copy["mass_ser_seed"] = ( X_copy["fruitmass"] * X_copy["fruitset"] * X_copy["seeds"] ) X_copy = X_copy.drop( [ "MaxOfLowerTRange", "MaxOfUpperTRange", "MinOfLowerTRange", "MinOfUpperTRange", ], axis=1, ) return X_copy else: return X_copy Creator = FeatureCreator(add_attributes=True) # # # Putting pieces together # Okay...We are almost ready to start the modeling. Before moving on we will make first touch with the data that we reserved for the pipeline. Let's separate target and features. pipe_original.index.names = ["id"] pipe_original.head() pipe_data = pipe_data.sample(frac=1, random_state=0) pipe_data = pd.concat([pipe_data, pipe_original]) pipe_data.info() y = pipe_data["yield"] pipe_data = pipe_data.drop("yield", axis=1) # # # Scikit-learn pipeline with AutoML # flaml from flaml import AutoML automl = AutoML() automl_pipeline = Pipeline([("Creator", Creator), ("automl", automl)]) automl_pipeline # Specify automl goal and constraint automl_settings = { "time_budget": 7500, "metric": "mae", "task": "regression", "seed": 7654321, "ensemble": True, } pipeline_settings = {f"automl__{key}": value for key, value in automl_settings.items()} automl_pipeline = automl_pipeline.fit(pipe_data, y, **pipeline_settings) preds_test = automl_pipeline.predict(pipe_test) # ### # # Submission output = pd.DataFrame({"id": pipe_test.index, "yield": preds_test}) output.to_csv("submission.csv", index=False) output.head()
false
3
2,579
0
3,315
2,579
129685366
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") train.head() train.isnull().sum() test.isnull().sum() test.head() test1 = test.iloc[:, 1:17] test1.head() X = train.iloc[:, 1:17] Y = train["yield"] X.head() Y.head() from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) X_train.shape, X_test.shape from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, Y_train) y_pred = lr.predict(X_test) y_pred Y_test.shape, y_pred.shape X_test.shape, test1.shape y_pred1 = lr.predict(test1) y_pred1 sample = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv") sample.head() sample["yield"] = y_pred1 sample.head() sample.to_csv("submission.csv", index=False) from sklearn.metrics import mean_squared_error acc = mean_squared_error(Y_test, y_pred) print(acc)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/685/129685366.ipynb
null
null
[{"Id": 129685366, "ScriptId": 38563657, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7351911, "CreationDate": "05/15/2023 18:25:16", "VersionNumber": 2.0, "Title": "Simple Linear Regression PS3E14_Prediction of Wild", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 71.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 71.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") train.head() train.isnull().sum() test.isnull().sum() test.head() test1 = test.iloc[:, 1:17] test1.head() X = train.iloc[:, 1:17] Y = train["yield"] X.head() Y.head() from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) X_train.shape, X_test.shape from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, Y_train) y_pred = lr.predict(X_test) y_pred Y_test.shape, y_pred.shape X_test.shape, test1.shape y_pred1 = lr.predict(test1) y_pred1 sample = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv") sample.head() sample["yield"] = y_pred1 sample.head() sample.to_csv("submission.csv", index=False) from sklearn.metrics import mean_squared_error acc = mean_squared_error(Y_test, y_pred) print(acc)
false
0
564
0
564
564
129685248
<jupyter_start><jupyter_text>Bank Customer Churn RowNumber—corresponds to the record (row) number and has no effect on the output. CustomerId—contains random values and has no effect on customer leaving the bank. Surname—the surname of a customer has no impact on their decision to leave the bank. CreditScore—can have an effect on customer churn, since a customer with a higher credit score is less likely to leave the bank. Geography—a customer’s location can affect their decision to leave the bank. Gender—it’s interesting to explore whether gender plays a role in a customer leaving the bank. Age—this is certainly relevant, since older customers are less likely to leave their bank than younger ones. Tenure—refers to the number of years that the customer has been a client of the bank. Normally, older clients are more loyal and less likely to leave a bank. Balance—also a very good indicator of customer churn, as people with a higher balance in their accounts are less likely to leave the bank compared to those with lower balances. NumOfProducts—refers to the number of products that a customer has purchased through the bank. HasCrCard—denotes whether or not a customer has a credit card. This column is also relevant, since people with a credit card are less likely to leave the bank. IsActiveMember—active customers are less likely to leave the bank. EstimatedSalary—as with balance, people with lower salaries are more likely to leave the bank compared to those with higher salaries. Exited—whether or not the customer left the bank. Complain—customer has complaint or not. Satisfaction Score—Score provided by the customer for their complaint resolution. Card Type—type of card hold by the customer. Points Earned—the points earned by the customer for using credit card. Acknowledgements As we know, it is much more expensive to sign in a new client than keeping an existing one. It is advantageous for banks to know what leads a client towards the decision to leave the company. Churn prevention allows companies to develop loyalty programs and retention campaigns to keep as many customers as possible. Kaggle dataset identifier: bank-customer-churn <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/bank-customer-churn/Customer-Churn-Records.csv") df.head() df = df.drop(["RowNumber", "CustomerId", "Surname"], axis=1) df.shape df.isnull().sum() df["Gender"].unique() df["Card Type"].unique() # ***Categorical feautures are:*** # 1. Gender # 2. HasCrCard # 3. IsActiveMember # 4. Exited # 5. Complain # 6. Card Type df1 = df.groupby("Complain")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="Complain", y="Exited", order=df1.Complain, color="#FF8C01") plt.xlabel("Has complained or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Complain history") plt.show() df1 = df.groupby("HasCrCard")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="HasCrCard", y="Exited", order=df1.HasCrCard, color="#FF8C01") plt.xlabel("Has Credit Card or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Credit Card Holder or Not") plt.show() df1 = df.groupby("Gender")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="Gender", y="Exited", order=df1.Gender, color="#FF8C01") plt.xlabel("Has Credit Card or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Gender of Customer") plt.show() df1 = ( df.groupby("IsActiveMember")["Exited"] .apply(lambda x: (x == 1).mean()) .reset_index() ) df1 = df1.sort_values("Exited", ascending=False) sns.barplot( data=df1, x="IsActiveMember", y="Exited", order=df1.IsActiveMember, color="#FF8C01" ) plt.xlabel("Is Active Member or Not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Activity of the Customer") plt.show() # The plot tells that if a bank customer is not active then there is a tendency for the customer to leave the bank. # Card Type df1 = df.groupby("Card Type")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Card Type", ascending=False) sns.barplot( data=df1, x="Card Type", y="Exited", order=df1["Card Type"], color="#FF8C01" ) plt.xlabel("Type of Card Holder") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Credit Card TYpe") plt.show() # It is shocking but the graph shows that a gold card holder has the largest chance of leaving the bank as compared to other card type holders. # And the most shocking part is that diamond users are more likely to be churned # as they have a proposition of 20% in the whole dataset. df.head() from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import ( accuracy_score, confusion_matrix, classification_report, precision_score, ) from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier lbl = LabelEncoder() # 1. Gender # 2. HasCrCard # 3. IsActiveMember # 4. Exited # 5. Complain # 6. Card Type df["Gender"] = lbl.fit_transform(df["Gender"]) df["Card Type"] = lbl.fit_transform(df["Card Type"]) df["Geography"] = lbl.fit_transform(df["Geography"]) x = df.drop(["Exited"], axis=1) # independent feature y = df["Exited"] # dependent feature X_train, X_test, y_train, y_test = train_test_split( x, y, test_size=0.15, random_state=42 ) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) k = np.sqrt(df.shape[0]) k accuracy = [] prec = [] iter = 1 for i in range(100, 105): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(X_train, y_train) y_predi = knn.predict(X_test) score = accuracy_score(y_test, y_predi) precision = precision_score(y_test, y_predi) accuracy.append(score * 100) prec.append(precision * 100) for i in range(len(accuracy)): print("Accuracy of " + str(iter) + "th model is: " + str(accuracy[i])) print("Precision of " + str(iter) + "th model is: " + str(prec[i])) iter += 1 from sklearn.ensemble import GradientBoostingClassifier model = GradientBoostingClassifier(n_estimators=12, loss="exponential", subsample=0.999) model.fit(X_train, y_train) y_pred = model.predict(X_test) score = accuracy_score(y_test, y_pred) precScore = precision_score(y_test, y_pred) print("ACCURACY OF the RFC model is: " + str(score * 100) + "%") print("Precision of the RFC model is: " + str(precScore * 100) + "%") from sklearn.ensemble import AdaBoostClassifier model = AdaBoostClassifier(n_estimators=11, random_state=1, algorithm="SAMME") model.fit(X_train, y_train) y_pred = model.predict(X_test) score = accuracy_score(y_test, y_pred) precScore = precision_score(y_test, y_pred) print("ACCURACY OF the RFC model is: " + str(score * 100) + "%") print("Precision of the RFC model is: " + str(precScore * 100) + "%")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/685/129685248.ipynb
bank-customer-churn
radheshyamkollipara
[{"Id": 129685248, "ScriptId": 38564486, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8103914, "CreationDate": "05/15/2023 18:23:56", "VersionNumber": 2.0, "Title": "notebookb3e9a432fe", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 179.0, "LinesInsertedFromPrevious": 82.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 97.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186008074, "KernelVersionId": 129685248, "SourceDatasetVersionId": 5550559}]
[{"Id": 5550559, "DatasetId": 3197960, "DatasourceVersionId": 5625285, "CreatorUserId": 14862076, "LicenseName": "Other (specified in description)", "CreationDate": "04/28/2023 16:32:01", "VersionNumber": 1.0, "Title": "Bank Customer Churn", "Slug": "bank-customer-churn", "Subtitle": "Bank Customer Data for Customer Churn", "Description": "RowNumber\u2014corresponds to the record (row) number and has no effect on the output.\nCustomerId\u2014contains random values and has no effect on customer leaving the bank.\nSurname\u2014the surname of a customer has no impact on their decision to leave the bank.\nCreditScore\u2014can have an effect on customer churn, since a customer with a higher credit score is less likely to leave the bank.\nGeography\u2014a customer\u2019s location can affect their decision to leave the bank.\nGender\u2014it\u2019s interesting to explore whether gender plays a role in a customer leaving the bank.\nAge\u2014this is certainly relevant, since older customers are less likely to leave their bank than younger ones.\nTenure\u2014refers to the number of years that the customer has been a client of the bank. Normally, older clients are more loyal and less likely to leave a bank.\nBalance\u2014also a very good indicator of customer churn, as people with a higher balance in their accounts are less likely to leave the bank compared to those with lower balances.\nNumOfProducts\u2014refers to the number of products that a customer has purchased through the bank.\nHasCrCard\u2014denotes whether or not a customer has a credit card. This column is also relevant, since people with a credit card are less likely to leave the bank.\nIsActiveMember\u2014active customers are less likely to leave the bank.\nEstimatedSalary\u2014as with balance, people with lower salaries are more likely to leave the bank compared to those with higher salaries.\nExited\u2014whether or not the customer left the bank.\nComplain\u2014customer has complaint or not.\nSatisfaction Score\u2014Score provided by the customer for their complaint resolution.\nCard Type\u2014type of card hold by the customer.\nPoints Earned\u2014the points earned by the customer for using credit card.\n\nAcknowledgements\n\nAs we know, it is much more expensive to sign in a new client than keeping an existing one.\n\nIt is advantageous for banks to know what leads a client towards the decision to leave the company.\n\nChurn prevention allows companies to develop loyalty programs and retention campaigns to keep as many customers as possible.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3197960, "CreatorUserId": 14862076, "OwnerUserId": 14862076.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5550559.0, "CurrentDatasourceVersionId": 5625285.0, "ForumId": 3262570, "Type": 2, "CreationDate": "04/28/2023 16:32:01", "LastActivityDate": "04/28/2023", "TotalViews": 39315, "TotalDownloads": 6814, "TotalVotes": 97, "TotalKernels": 52}]
[{"Id": 14862076, "UserName": "radheshyamkollipara", "DisplayName": "Radheshyam Kollipara", "RegisterDate": "04/28/2023", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/bank-customer-churn/Customer-Churn-Records.csv") df.head() df = df.drop(["RowNumber", "CustomerId", "Surname"], axis=1) df.shape df.isnull().sum() df["Gender"].unique() df["Card Type"].unique() # ***Categorical feautures are:*** # 1. Gender # 2. HasCrCard # 3. IsActiveMember # 4. Exited # 5. Complain # 6. Card Type df1 = df.groupby("Complain")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="Complain", y="Exited", order=df1.Complain, color="#FF8C01") plt.xlabel("Has complained or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Complain history") plt.show() df1 = df.groupby("HasCrCard")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="HasCrCard", y="Exited", order=df1.HasCrCard, color="#FF8C01") plt.xlabel("Has Credit Card or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Credit Card Holder or Not") plt.show() df1 = df.groupby("Gender")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="Gender", y="Exited", order=df1.Gender, color="#FF8C01") plt.xlabel("Has Credit Card or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Gender of Customer") plt.show() df1 = ( df.groupby("IsActiveMember")["Exited"] .apply(lambda x: (x == 1).mean()) .reset_index() ) df1 = df1.sort_values("Exited", ascending=False) sns.barplot( data=df1, x="IsActiveMember", y="Exited", order=df1.IsActiveMember, color="#FF8C01" ) plt.xlabel("Is Active Member or Not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Activity of the Customer") plt.show() # The plot tells that if a bank customer is not active then there is a tendency for the customer to leave the bank. # Card Type df1 = df.groupby("Card Type")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Card Type", ascending=False) sns.barplot( data=df1, x="Card Type", y="Exited", order=df1["Card Type"], color="#FF8C01" ) plt.xlabel("Type of Card Holder") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Credit Card TYpe") plt.show() # It is shocking but the graph shows that a gold card holder has the largest chance of leaving the bank as compared to other card type holders. # And the most shocking part is that diamond users are more likely to be churned # as they have a proposition of 20% in the whole dataset. df.head() from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import ( accuracy_score, confusion_matrix, classification_report, precision_score, ) from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier lbl = LabelEncoder() # 1. Gender # 2. HasCrCard # 3. IsActiveMember # 4. Exited # 5. Complain # 6. Card Type df["Gender"] = lbl.fit_transform(df["Gender"]) df["Card Type"] = lbl.fit_transform(df["Card Type"]) df["Geography"] = lbl.fit_transform(df["Geography"]) x = df.drop(["Exited"], axis=1) # independent feature y = df["Exited"] # dependent feature X_train, X_test, y_train, y_test = train_test_split( x, y, test_size=0.15, random_state=42 ) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) k = np.sqrt(df.shape[0]) k accuracy = [] prec = [] iter = 1 for i in range(100, 105): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(X_train, y_train) y_predi = knn.predict(X_test) score = accuracy_score(y_test, y_predi) precision = precision_score(y_test, y_predi) accuracy.append(score * 100) prec.append(precision * 100) for i in range(len(accuracy)): print("Accuracy of " + str(iter) + "th model is: " + str(accuracy[i])) print("Precision of " + str(iter) + "th model is: " + str(prec[i])) iter += 1 from sklearn.ensemble import GradientBoostingClassifier model = GradientBoostingClassifier(n_estimators=12, loss="exponential", subsample=0.999) model.fit(X_train, y_train) y_pred = model.predict(X_test) score = accuracy_score(y_test, y_pred) precScore = precision_score(y_test, y_pred) print("ACCURACY OF the RFC model is: " + str(score * 100) + "%") print("Precision of the RFC model is: " + str(precScore * 100) + "%") from sklearn.ensemble import AdaBoostClassifier model = AdaBoostClassifier(n_estimators=11, random_state=1, algorithm="SAMME") model.fit(X_train, y_train) y_pred = model.predict(X_test) score = accuracy_score(y_test, y_pred) precScore = precision_score(y_test, y_pred) print("ACCURACY OF the RFC model is: " + str(score * 100) + "%") print("Precision of the RFC model is: " + str(precScore * 100) + "%")
false
1
1,932
0
2,433
1,932
129766849
<jupyter_start><jupyter_text>Diamond Kaggle dataset identifier: diamond <jupyter_script>import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import accuracy_score from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from xgboost import XGBRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import cross_val_score from sklearn import metrics dataset = pd.read_csv("/kaggle/input/diamond/diamonds.csv") dataset.info() dataset.head() dataset.drop("Unnamed: 0", axis=1) dataset.isnull().sum() dataset.isna().sum() dataset.shape # Dropping dimentionless diamonds dataset = dataset.drop(dataset[dataset["x"] == 0].index) dataset = dataset.drop(dataset[dataset["y"] == 0].index) dataset = dataset.drop(dataset[dataset["z"] == 0].index) dataset.shape ax = sns.pairplot(dataset, hue="cut") # Dropping the outliers. dataset = dataset[(dataset["depth"] < 75) & (dataset["depth"] > 45)] dataset = dataset[(dataset["table"] < 80) & (dataset["table"] > 40)] dataset = dataset[(dataset["x"] < 30)] dataset = dataset[(dataset["y"] < 30)] dataset = dataset[(dataset["z"] < 30) & (dataset["z"] > 2)] dataset.shape ax = sns.pairplot(dataset, hue="cut") dataset.head() dataset.info() dataset.head() # Get list of categorical variables s = dataset.dtypes == "object" object_cols = list(s[s].index) print("Categorical variables:") print(object_cols) # Make copy to avoid changing original data label_data = dataset.copy() # Apply label encoder to each column with categorical data label_encoder = LabelEncoder() for col in object_cols: label_data[col] = label_encoder.fit_transform(label_data[col]) label_data.head() # correlation matrix cmap = sns.diverging_palette(70, 20, s=50, l=40, n=6, as_cmap=True) corrmat = label_data.corr() f, ax = plt.subplots(figsize=(12, 12)) sns.heatmap( corrmat, cmap=cmap, annot=True, ) # Assigning the featurs as X and trarget as y X = label_data.drop(["price"], axis=1) y = label_data["price"] print(X) print(y) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=1 ) # Building pipelins of standard scaler and model for varios regressors. pipeline_lr = Pipeline( [("scalar1", StandardScaler()), ("lr_classifier", LinearRegression())] ) pipeline_dt = Pipeline( [("scalar2", StandardScaler()), ("dt_classifier", DecisionTreeRegressor())] ) pipeline_rf = Pipeline( [("scalar3", StandardScaler()), ("rf_classifier", RandomForestRegressor())] ) pipeline_kn = Pipeline( [("scalar4", StandardScaler()), ("rf_classifier", KNeighborsRegressor())] ) pipeline_xgb = Pipeline( [("scalar5", StandardScaler()), ("rf_classifier", XGBRegressor())] ) # List of all the pipelines pipelines = [pipeline_lr, pipeline_dt, pipeline_rf, pipeline_kn, pipeline_xgb] # Dictionary of pipelines and model types for ease of reference pipe_dict = { 0: "LinearRegression", 1: "DecisionTree", 2: "RandomForest", 3: "KNeighbors", 4: "XGBRegressor", } # Fit the pipelines for pipe in pipelines: pipe.fit(X_train, y_train) cv_results_rms = [] for i, model in enumerate(pipelines): cv_score = cross_val_score( model, X_train, y_train, scoring="neg_root_mean_squared_error", cv=10 ) cv_results_rms.append(cv_score) print("%s: %f " % (pipe_dict[i], cv_score.mean())) # Model prediction on test data pred = pipeline_xgb.predict(X_test) # Model Evaluation print("R^2:", metrics.r2_score(y_test, pred)) print( "Adjusted R^2:", 1 - (1 - metrics.r2_score(y_test, pred)) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1), ) print("MAE:", metrics.mean_absolute_error(y_test, pred)) print("MSE:", metrics.mean_squared_error(y_test, pred)) print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, pred)))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/766/129766849.ipynb
diamond
sonyaugustine123
[{"Id": 129766849, "ScriptId": 38591683, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14505047, "CreationDate": "05/16/2023 10:21:47", "VersionNumber": 1.0, "Title": "Diamond Price Prediction", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 131.0, "LinesInsertedFromPrevious": 131.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186126513, "KernelVersionId": 129766849, "SourceDatasetVersionId": 5697405}]
[{"Id": 5697405, "DatasetId": 3275940, "DatasourceVersionId": 5773048, "CreatorUserId": 14505047, "LicenseName": "Unknown", "CreationDate": "05/16/2023 09:52:31", "VersionNumber": 1.0, "Title": "Diamond", "Slug": "diamond", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3275940, "CreatorUserId": 14505047, "OwnerUserId": 14505047.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5697405.0, "CurrentDatasourceVersionId": 5773048.0, "ForumId": 3341617, "Type": 2, "CreationDate": "05/16/2023 09:52:31", "LastActivityDate": "05/16/2023", "TotalViews": 56, "TotalDownloads": 7, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 14505047, "UserName": "sonyaugustine123", "DisplayName": "Sony Augustine@123", "RegisterDate": "04/05/2023", "PerformanceTier": 0}]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import accuracy_score from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from xgboost import XGBRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import cross_val_score from sklearn import metrics dataset = pd.read_csv("/kaggle/input/diamond/diamonds.csv") dataset.info() dataset.head() dataset.drop("Unnamed: 0", axis=1) dataset.isnull().sum() dataset.isna().sum() dataset.shape # Dropping dimentionless diamonds dataset = dataset.drop(dataset[dataset["x"] == 0].index) dataset = dataset.drop(dataset[dataset["y"] == 0].index) dataset = dataset.drop(dataset[dataset["z"] == 0].index) dataset.shape ax = sns.pairplot(dataset, hue="cut") # Dropping the outliers. dataset = dataset[(dataset["depth"] < 75) & (dataset["depth"] > 45)] dataset = dataset[(dataset["table"] < 80) & (dataset["table"] > 40)] dataset = dataset[(dataset["x"] < 30)] dataset = dataset[(dataset["y"] < 30)] dataset = dataset[(dataset["z"] < 30) & (dataset["z"] > 2)] dataset.shape ax = sns.pairplot(dataset, hue="cut") dataset.head() dataset.info() dataset.head() # Get list of categorical variables s = dataset.dtypes == "object" object_cols = list(s[s].index) print("Categorical variables:") print(object_cols) # Make copy to avoid changing original data label_data = dataset.copy() # Apply label encoder to each column with categorical data label_encoder = LabelEncoder() for col in object_cols: label_data[col] = label_encoder.fit_transform(label_data[col]) label_data.head() # correlation matrix cmap = sns.diverging_palette(70, 20, s=50, l=40, n=6, as_cmap=True) corrmat = label_data.corr() f, ax = plt.subplots(figsize=(12, 12)) sns.heatmap( corrmat, cmap=cmap, annot=True, ) # Assigning the featurs as X and trarget as y X = label_data.drop(["price"], axis=1) y = label_data["price"] print(X) print(y) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=1 ) # Building pipelins of standard scaler and model for varios regressors. pipeline_lr = Pipeline( [("scalar1", StandardScaler()), ("lr_classifier", LinearRegression())] ) pipeline_dt = Pipeline( [("scalar2", StandardScaler()), ("dt_classifier", DecisionTreeRegressor())] ) pipeline_rf = Pipeline( [("scalar3", StandardScaler()), ("rf_classifier", RandomForestRegressor())] ) pipeline_kn = Pipeline( [("scalar4", StandardScaler()), ("rf_classifier", KNeighborsRegressor())] ) pipeline_xgb = Pipeline( [("scalar5", StandardScaler()), ("rf_classifier", XGBRegressor())] ) # List of all the pipelines pipelines = [pipeline_lr, pipeline_dt, pipeline_rf, pipeline_kn, pipeline_xgb] # Dictionary of pipelines and model types for ease of reference pipe_dict = { 0: "LinearRegression", 1: "DecisionTree", 2: "RandomForest", 3: "KNeighbors", 4: "XGBRegressor", } # Fit the pipelines for pipe in pipelines: pipe.fit(X_train, y_train) cv_results_rms = [] for i, model in enumerate(pipelines): cv_score = cross_val_score( model, X_train, y_train, scoring="neg_root_mean_squared_error", cv=10 ) cv_results_rms.append(cv_score) print("%s: %f " % (pipe_dict[i], cv_score.mean())) # Model prediction on test data pred = pipeline_xgb.predict(X_test) # Model Evaluation print("R^2:", metrics.r2_score(y_test, pred)) print( "Adjusted R^2:", 1 - (1 - metrics.r2_score(y_test, pred)) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1), ) print("MAE:", metrics.mean_absolute_error(y_test, pred)) print("MSE:", metrics.mean_squared_error(y_test, pred)) print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, pred)))
false
1
1,281
0
1,299
1,281
129321175
"C:/Users/berkg/OneDrive/Masaüstü/dataset/car_evaluation.csv" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") data = "C:/Users/berkg/OneDrive/Masaüstü/dataset/car_evaluation.csv" df = pd.read_csv(data, header=None) df.shape df.head() col_names = ["buying", "maint", "doors", "persons", "lug_boot", "safety", "class"] df.columns = col_names col_names df.head() df.info() col_names = ["buying", "maint", "doors", "persons", "lug_boot", "safety", "class"] for col in col_names: print(df[col].value_counts()) df["class"].value_counts() df.isnull().sum() x = df.drop(["class"], axis=1) y = df["class"] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.33, random_state=42 ) x_train.shape, x_test.shape x_train.dtypes x_train.head() import category_encoders as ce encoder = ce.OrdinalEncoder( cols=["buying", "maint", "doors", "persons", "lug_boot", "safety"] ) x_train = encoder.fit_transform(x_train) x_test = encoder.transform(x_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/321/129321175.ipynb
null
null
[{"Id": 129321175, "ScriptId": 38449867, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13980931, "CreationDate": "05/12/2023 18:49:52", "VersionNumber": 1.0, "Title": "MLHOMEWORK", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 53.0, "LinesInsertedFromPrevious": 53.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
"C:/Users/berkg/OneDrive/Masaüstü/dataset/car_evaluation.csv" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") data = "C:/Users/berkg/OneDrive/Masaüstü/dataset/car_evaluation.csv" df = pd.read_csv(data, header=None) df.shape df.head() col_names = ["buying", "maint", "doors", "persons", "lug_boot", "safety", "class"] df.columns = col_names col_names df.head() df.info() col_names = ["buying", "maint", "doors", "persons", "lug_boot", "safety", "class"] for col in col_names: print(df[col].value_counts()) df["class"].value_counts() df.isnull().sum() x = df.drop(["class"], axis=1) y = df["class"] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.33, random_state=42 ) x_train.shape, x_test.shape x_train.dtypes x_train.head() import category_encoders as ce encoder = ce.OrdinalEncoder( cols=["buying", "maint", "doors", "persons", "lug_boot", "safety"] ) x_train = encoder.fit_transform(x_train) x_test = encoder.transform(x_test)
false
0
405
0
405
405
129321822
<jupyter_start><jupyter_text>Suicide Attempts in Shandong, China ``` Data on serious suicide attempts in Shandong, China A data frame with 2571 observations on the following 11 variables. ``` | Column | Description | | --- | --- | | Person_ID | ID number of victims | | Hospitalised | Hospitalized? (no or yes) | | Died | Died? (no or yes) | | Urban | Urban area? (no, unknown, or yes) | | Year | Year (2009, 2010, or 2011) | | Month | Month (1=Jan through 12=December) | | Sex | Sex (female or male) | | Age | Age (years) | | Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) | | Occupation | One of ten occupation categories | | method | One of nine possible methods | ### Details Data from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China. ## Source Sun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) "Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study," BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762 Kaggle dataset identifier: suicide-attempts-in-shandong-china <jupyter_script># # 📚 Imports # --- import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder import warnings warnings.filterwarnings("ignore") # # 📖 Data # --- df = pd.read_csv("/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv") df.head() df.info() # Checking null values df.isna().sum() # Drop unnecessary columns df.drop(["Unnamed: 0", "Person_ID"], axis=1, inplace=True) # Columns Value Count print(df["Hospitalised"].value_counts()) print("-" * 30) print(df["Died"].value_counts()) print("-" * 30) print(df["Urban"].value_counts()) print("-" * 30) print(df["Education"].value_counts()) print("-" * 30) print(df["Occupation"].value_counts()) print("-" * 30) print(df["method"].value_counts()) # # 📊 Visualization # ## All columns compared with Age def plots(df, x): plt.style.use("dark_background") f, ax = plt.subplots(1, 2, figsize=(25, 10)) Group_data = df.groupby(x) sns.barplot( x=Group_data["Age"].mean().index, y=Group_data["Age"].mean().values, ax=ax[0], palette="viridis", ) for container in ax[0].containers: ax[0].bar_label(container, color="white", size=20) palette_color = sns.color_palette("viridis") plt.pie( x=df[x].value_counts(), labels=df[x].value_counts().index, autopct="%.0f%%", shadow=True, colors=palette_color, ) plt.suptitle(x, fontsize=25) plt.show() for i in df.columns: if i != "Age": plots(df, i) # ## Values Distribuition plt.style.use("dark_background") plt.figure(figsize=(12, 20)) list_columns = list(df.columns) for i in range(len(list_columns)): plt.subplot(5, 2, i + 1) plt.title(list_columns[i]) plt.hist(df[list_columns[i]]) plt.grid(alpha=0.5) plt.tight_layout() # ## Values Distribuition Compared with Died plt.style.use("dark_background") fig, axs = plt.subplots(6, 2, figsize=(10, 20)) i = 1 for feature in df.columns: if feature not in ["Died"] and i < 14: plt.subplot(5, 2, i) sns.histplot( data=df, x=feature, kde=True, palette="winter", hue="Died", alpha=0.8 ) plt.grid(alpha=0.5) i += 1 # ## Died by Month plt.figure(figsize=(10, 7)) plt.style.use("dark_background") sns.violinplot(x="Died", y="Month", data=df, palette="viridis") # ## Age Compared to Month of Occurrence plt.style.use("dark_background") plt.figure(figsize=(12, 10)) plt.hexbin(df["Age"], df["Month"], gridsize=12, cmap="viridis", mincnt=1) plt.colorbar(label="Count") plt.xlabel("Age") plt.ylabel("Month") plt.title("Age Compared to Month of Occurrence") plt.show() # ## Age Compared to Year of Occurrence plt.style.use("dark_background") plt.figure(figsize=(10, 5)) plt.hexbin(df["Age"], df["Year"], gridsize=3, cmap="viridis", mincnt=1) plt.colorbar(label="Count") plt.xlabel("Age") plt.ylabel("Year") plt.title("Age Compared to Year of Occurrence") plt.show() # ## Converting object values to numeric for correlation list_str = df.select_dtypes(include="object").columns le = LabelEncoder() for c in list_str: df[c] = le.fit_transform(df[c]) # ## Correlation plt.figure(figsize=(15, 12)) plt.style.use("dark_background") sns.heatmap(df.corr(), annot=True, cmap="viridis")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/321/129321822.ipynb
suicide-attempts-in-shandong-china
utkarshx27
[{"Id": 129321822, "ScriptId": 38449901, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12038039, "CreationDate": "05/12/2023 18:57:51", "VersionNumber": 1.0, "Title": "Suicide Attempts in Shandong EDA \ud83d\udcca", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 139.0, "LinesInsertedFromPrevious": 139.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 19}]
[{"Id": 185260506, "KernelVersionId": 129321822, "SourceDatasetVersionId": 5617993}]
[{"Id": 5617993, "DatasetId": 3230370, "DatasourceVersionId": 5693173, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/06/2023 11:54:22", "VersionNumber": 1.0, "Title": "Suicide Attempts in Shandong, China", "Slug": "suicide-attempts-in-shandong-china", "Subtitle": "Serious Suicide Attempts in Shandong, China: Three-Year Study", "Description": "```\nData on serious suicide attempts in Shandong, China\nA data frame with 2571 observations on the following 11 variables.\n```\n\n| Column | Description |\n| --- | --- |\n| Person_ID | ID number of victims |\n| Hospitalised | Hospitalized? (no or yes) |\n| Died | Died? (no or yes) |\n| Urban | Urban area? (no, unknown, or yes) |\n| Year | Year (2009, 2010, or 2011) |\n| Month | Month (1=Jan through 12=December) |\n| Sex | Sex (female or male) |\n| Age | Age (years) |\n| Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) |\n| Occupation | One of ten occupation categories |\n| method | One of nine possible methods |\n\n### Details \nData from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China.\n\n## Source\nSun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) \"Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study,\" BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3230370, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5617993.0, "CurrentDatasourceVersionId": 5693173.0, "ForumId": 3295509, "Type": 2, "CreationDate": "05/06/2023 11:54:22", "LastActivityDate": "05/06/2023", "TotalViews": 8885, "TotalDownloads": 1402, "TotalVotes": 42, "TotalKernels": 12}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
# # 📚 Imports # --- import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder import warnings warnings.filterwarnings("ignore") # # 📖 Data # --- df = pd.read_csv("/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv") df.head() df.info() # Checking null values df.isna().sum() # Drop unnecessary columns df.drop(["Unnamed: 0", "Person_ID"], axis=1, inplace=True) # Columns Value Count print(df["Hospitalised"].value_counts()) print("-" * 30) print(df["Died"].value_counts()) print("-" * 30) print(df["Urban"].value_counts()) print("-" * 30) print(df["Education"].value_counts()) print("-" * 30) print(df["Occupation"].value_counts()) print("-" * 30) print(df["method"].value_counts()) # # 📊 Visualization # ## All columns compared with Age def plots(df, x): plt.style.use("dark_background") f, ax = plt.subplots(1, 2, figsize=(25, 10)) Group_data = df.groupby(x) sns.barplot( x=Group_data["Age"].mean().index, y=Group_data["Age"].mean().values, ax=ax[0], palette="viridis", ) for container in ax[0].containers: ax[0].bar_label(container, color="white", size=20) palette_color = sns.color_palette("viridis") plt.pie( x=df[x].value_counts(), labels=df[x].value_counts().index, autopct="%.0f%%", shadow=True, colors=palette_color, ) plt.suptitle(x, fontsize=25) plt.show() for i in df.columns: if i != "Age": plots(df, i) # ## Values Distribuition plt.style.use("dark_background") plt.figure(figsize=(12, 20)) list_columns = list(df.columns) for i in range(len(list_columns)): plt.subplot(5, 2, i + 1) plt.title(list_columns[i]) plt.hist(df[list_columns[i]]) plt.grid(alpha=0.5) plt.tight_layout() # ## Values Distribuition Compared with Died plt.style.use("dark_background") fig, axs = plt.subplots(6, 2, figsize=(10, 20)) i = 1 for feature in df.columns: if feature not in ["Died"] and i < 14: plt.subplot(5, 2, i) sns.histplot( data=df, x=feature, kde=True, palette="winter", hue="Died", alpha=0.8 ) plt.grid(alpha=0.5) i += 1 # ## Died by Month plt.figure(figsize=(10, 7)) plt.style.use("dark_background") sns.violinplot(x="Died", y="Month", data=df, palette="viridis") # ## Age Compared to Month of Occurrence plt.style.use("dark_background") plt.figure(figsize=(12, 10)) plt.hexbin(df["Age"], df["Month"], gridsize=12, cmap="viridis", mincnt=1) plt.colorbar(label="Count") plt.xlabel("Age") plt.ylabel("Month") plt.title("Age Compared to Month of Occurrence") plt.show() # ## Age Compared to Year of Occurrence plt.style.use("dark_background") plt.figure(figsize=(10, 5)) plt.hexbin(df["Age"], df["Year"], gridsize=3, cmap="viridis", mincnt=1) plt.colorbar(label="Count") plt.xlabel("Age") plt.ylabel("Year") plt.title("Age Compared to Year of Occurrence") plt.show() # ## Converting object values to numeric for correlation list_str = df.select_dtypes(include="object").columns le = LabelEncoder() for c in list_str: df[c] = le.fit_transform(df[c]) # ## Correlation plt.figure(figsize=(15, 12)) plt.style.use("dark_background") sns.heatmap(df.corr(), annot=True, cmap="viridis")
false
1
1,160
19
1,572
1,160
129321538
<jupyter_start><jupyter_text>Flickr 8k Dataset ### Context A new benchmark collection for sentence-based image description and search, consisting of 8,000 images that are each paired with five different captions which provide clear descriptions of the salient entities and events. … The images were chosen from six different Flickr groups, and tend not to contain any well-known people or locations, but were manually selected to depict a variety of scenes and situations ### Content66 What's inside is more than just rows and columns. Make it easy for others to get started by describing how you acquired the data and what time period it represents, too. Kaggle dataset identifier: flickr8k <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import torch from torch_snippets import * from torchvision import transforms from sklearn.model_selection import train_test_split device = "cuda" if torch.cuda.is_available() else "cpu" df = pd.read_csv("/kaggle/input/flickr8k/captions.txt", delimiter=",") df txt = df[df.columns[-1]].tolist() images = df[df.columns[0]].tolist() train_image, test_image, train_text, test_text = train_test_split( images, txt, test_size=0.2 ) tfms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), # imagenet ] ) from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("microsoft/git-base") class SegData(torch.utils.data.Dataset): def __init__(self, images, txt): self.image_path = "/kaggle/input/flickr8k/Images/" self.images = images self.txt = txt self.processor = processor def __len__(self): return len(self.images) def __getitem__(self, ix): image = cv2.imread(self.image_path + self.images[ix]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # image = cv2.resize(image, (224,224)) label = self.txt[ix] encoding = self.processor( images=image, text=label, padding="max_length", return_tensors="pt" ) encoding = {k: v.squeeze() for k, v in encoding.items()} # tokenized=tokenizer(label,max_length=128,padding=True,truncation=True,return_tensors="pt") # image=tfms(image/255.) return encoding def choose(self): return self[randint(len(self.images))] train_dataset = SegData(train_image, train_text) test_dataset = SegData(test_image, test_text) import matplotlib.pyplot as plt import cv2 encoding = train_dataset[-105] plt.imshow(encoding["pixel_values"].permute(1, 2, 0).detach().numpy()) processor.decode(encoding["input_ids"].tolist()) trn_dl = torch.utils.data.DataLoader( train_dataset, batch_size=2, drop_last=True, shuffle=True ) test_dl = torch.utils.data.DataLoader( test_dataset, batch_size=2, drop_last=True, shuffle=True ) from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("microsoft/git-base").to(device) optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) def train_batch(batch): model.train() output = model( input_ids=batch["input_ids"].to(device), pixel_values=batch["pixel_values"].to(device), attention_mask=batch["attention_mask"].to(device), labels=batch["input_ids"].to(device), ) loss = output.loss optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() @torch.no_grad() def valid_batch(batch): model.eval() output = model( input_ids=batch["input_ids"].to(device), pixel_values=batch["pixel_values"].to(device), attention_mask=batch["attention_mask"].to(device), labels=batch["input_ids"].to(device), ) loss = output.loss return loss.item() # image,label=batch # model.eval() # label_t=tokenizer(label,max_length=32,padding=True,truncation=True,return_tensors="pt" # ,add_special_tokens=True) # inputs={i:j.to(device) for i,j in label_t.items()} # x=inputs['input_ids'][:,:-1] # y=inputs['input_ids'][:,1:] # outputs=model(image,x) # B,T,C=outputs.size() # outputs=outputs.reshape(B*T,C) # y=y.reshape(B*T) # loss=loss_fn(outputs,y) # return loss.item() n_epoch = 1 log = Report(n_epoch) for epochs in range(n_epoch): N = len(trn_dl) for i, data in enumerate(trn_dl): loss = train_batch(data) log.record(epochs + (i + 1) / N, trn_loss=loss, end="\r") N = len(test_dl) for i, data in enumerate(trn_dl): val_loss = valid_batch(data) test_dl = torch.utils.data.DataLoader( test_dataset, batch_size=2, drop_last=True, shuffle=True ) encoded = next(iter(test_dl)) pixel_values = encoded["pixel_values"][0].to(device) plt.imshow(pixel_values.permute(1, 2, 0).detach().cpu().numpy()) with torch.no_grad(): generated_ids = model.generate( pixel_values=pixel_values.unsqueeze(0), max_length=50 ) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_caption) for _ in range(5): test_dl = torch.utils.data.DataLoader( test_dataset, batch_size=2, drop_last=True, shuffle=True ) encoded = next(iter(test_dl)) pixel_values = encoded["pixel_values"][0].to(device) with torch.no_grad(): generated_ids = model.generate( pixel_values=pixel_values.unsqueeze(0), max_length=50 ) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[ 0 ] show(pixel_values.permute(1, 2, 0).detach().cpu().numpy(), title=generated_caption)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/321/129321538.ipynb
flickr8k
adityajn105
[{"Id": 129321538, "ScriptId": 37112693, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2294317, "CreationDate": "05/12/2023 18:54:22", "VersionNumber": 1.0, "Title": "image_captioning_pytorch", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 152.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185260018, "KernelVersionId": 129321538, "SourceDatasetVersionId": 1111676}]
[{"Id": 1111676, "DatasetId": 623289, "DatasourceVersionId": 1141936, "CreatorUserId": 1526260, "LicenseName": "CC0: Public Domain", "CreationDate": "04/27/2020 07:27:19", "VersionNumber": 1.0, "Title": "Flickr 8k Dataset", "Slug": "flickr8k", "Subtitle": "Flickr8k Dataset for image captioning.", "Description": "### Context\n\nA new benchmark collection for sentence-based image description and search, consisting of 8,000 images that are each paired with five different captions which provide clear descriptions of the salient entities and events. \u2026 The images were chosen from six different Flickr groups, and tend not to contain any well-known people or locations, but were manually selected to depict a variety of scenes and situations\n\n### Content66\n\nWhat's inside is more than just rows and columns. Make it easy for others to get started by describing how you acquired the data and what time period it represents, too.\n\n\n### Acknowledgements\n\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n\n### Inspiration\n\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 623289, "CreatorUserId": 1526260, "OwnerUserId": 1526260.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1111676.0, "CurrentDatasourceVersionId": 1141936.0, "ForumId": 637462, "Type": 2, "CreationDate": "04/27/2020 07:27:19", "LastActivityDate": "04/27/2020", "TotalViews": 160531, "TotalDownloads": 44586, "TotalVotes": 270, "TotalKernels": 277}]
[{"Id": 1526260, "UserName": "adityajn105", "DisplayName": "adityajn105", "RegisterDate": "01/02/2018", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import torch from torch_snippets import * from torchvision import transforms from sklearn.model_selection import train_test_split device = "cuda" if torch.cuda.is_available() else "cpu" df = pd.read_csv("/kaggle/input/flickr8k/captions.txt", delimiter=",") df txt = df[df.columns[-1]].tolist() images = df[df.columns[0]].tolist() train_image, test_image, train_text, test_text = train_test_split( images, txt, test_size=0.2 ) tfms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), # imagenet ] ) from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("microsoft/git-base") class SegData(torch.utils.data.Dataset): def __init__(self, images, txt): self.image_path = "/kaggle/input/flickr8k/Images/" self.images = images self.txt = txt self.processor = processor def __len__(self): return len(self.images) def __getitem__(self, ix): image = cv2.imread(self.image_path + self.images[ix]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # image = cv2.resize(image, (224,224)) label = self.txt[ix] encoding = self.processor( images=image, text=label, padding="max_length", return_tensors="pt" ) encoding = {k: v.squeeze() for k, v in encoding.items()} # tokenized=tokenizer(label,max_length=128,padding=True,truncation=True,return_tensors="pt") # image=tfms(image/255.) return encoding def choose(self): return self[randint(len(self.images))] train_dataset = SegData(train_image, train_text) test_dataset = SegData(test_image, test_text) import matplotlib.pyplot as plt import cv2 encoding = train_dataset[-105] plt.imshow(encoding["pixel_values"].permute(1, 2, 0).detach().numpy()) processor.decode(encoding["input_ids"].tolist()) trn_dl = torch.utils.data.DataLoader( train_dataset, batch_size=2, drop_last=True, shuffle=True ) test_dl = torch.utils.data.DataLoader( test_dataset, batch_size=2, drop_last=True, shuffle=True ) from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("microsoft/git-base").to(device) optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) def train_batch(batch): model.train() output = model( input_ids=batch["input_ids"].to(device), pixel_values=batch["pixel_values"].to(device), attention_mask=batch["attention_mask"].to(device), labels=batch["input_ids"].to(device), ) loss = output.loss optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() @torch.no_grad() def valid_batch(batch): model.eval() output = model( input_ids=batch["input_ids"].to(device), pixel_values=batch["pixel_values"].to(device), attention_mask=batch["attention_mask"].to(device), labels=batch["input_ids"].to(device), ) loss = output.loss return loss.item() # image,label=batch # model.eval() # label_t=tokenizer(label,max_length=32,padding=True,truncation=True,return_tensors="pt" # ,add_special_tokens=True) # inputs={i:j.to(device) for i,j in label_t.items()} # x=inputs['input_ids'][:,:-1] # y=inputs['input_ids'][:,1:] # outputs=model(image,x) # B,T,C=outputs.size() # outputs=outputs.reshape(B*T,C) # y=y.reshape(B*T) # loss=loss_fn(outputs,y) # return loss.item() n_epoch = 1 log = Report(n_epoch) for epochs in range(n_epoch): N = len(trn_dl) for i, data in enumerate(trn_dl): loss = train_batch(data) log.record(epochs + (i + 1) / N, trn_loss=loss, end="\r") N = len(test_dl) for i, data in enumerate(trn_dl): val_loss = valid_batch(data) test_dl = torch.utils.data.DataLoader( test_dataset, batch_size=2, drop_last=True, shuffle=True ) encoded = next(iter(test_dl)) pixel_values = encoded["pixel_values"][0].to(device) plt.imshow(pixel_values.permute(1, 2, 0).detach().cpu().numpy()) with torch.no_grad(): generated_ids = model.generate( pixel_values=pixel_values.unsqueeze(0), max_length=50 ) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_caption) for _ in range(5): test_dl = torch.utils.data.DataLoader( test_dataset, batch_size=2, drop_last=True, shuffle=True ) encoded = next(iter(test_dl)) pixel_values = encoded["pixel_values"][0].to(device) with torch.no_grad(): generated_ids = model.generate( pixel_values=pixel_values.unsqueeze(0), max_length=50 ) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[ 0 ] show(pixel_values.permute(1, 2, 0).detach().cpu().numpy(), title=generated_caption)
false
0
1,732
1
1,885
1,732
129321347
<jupyter_start><jupyter_text>Dogs & Cats Images Kaggle dataset identifier: dogs-cats-images <jupyter_script>import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import seaborn as sns plt.rcParams["figure.figsize"] = (20, 8) from tensorflow.keras.preprocessing.image import ( ImageDataGenerator, load_img, img_to_array, ) from tensorflow.keras.applications import VGG16, ResNet50, InceptionV3, MobileNet img_size = 150 batch_size = 32 train_datagen = ImageDataGenerator( rescale=1 / 255.0, rotation_range=30, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, ) # this is for normalization, basically to shorten the range val_datagen = ImageDataGenerator(rescale=1 / 255.0) train_generator = train_datagen.flow_from_directory( "/kaggle/input/dogs-cats-images/dataset/training_set", target_size=(img_size, img_size), batch_size=batch_size, shuffle=True, # shuffle the images in every iteration class_mode="binary", ) val_generator = val_datagen.flow_from_directory( "/kaggle/input/dogs-cats-images/dataset/test_set", target_size=(img_size, img_size), batch_size=batch_size, shuffle=False, class_mode="binary", ) # Visulization of 15 Random Samples from a Batch of 32 labels = ["cat", "dog"] samples = train_generator.__next__() images = samples[0] target = samples[1] plt.figure(figsize=(20, 20)) for i in range(15): plt.subplot(5, 5, i + 1) plt.subplots_adjust(hspace=0.3, wspace=0.3) plt.imshow(images[i]) plt.title(f"Class: {labels[int(target[i])]}") plt.axis("off") from tensorflow.keras.layers import ( Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation, Dropout, Flatten, Dense, Input, Layer, ) from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Input model = Sequential() model.add( Conv2D(32, (3, 3), input_shape=(150, 150, 3), activation="relu", padding="same") ) model.add(MaxPooling2D(2, 2)) model.add(Conv2D(64, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D(2, 2)) model.add(Conv2D(128, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D(2, 2)) model.add(Flatten()) model.add(Dense(units=512, activation="relu")) model.add(Dense(units=1, activation="sigmoid")) model.summary() model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping filepath = "model_cnn.h5" checkpoint = ModelCheckpoint( filepath, monitor="val_loss", verbose=1, save_best_only=True, save_weights_only=False, ) history = model.fit( train_generator, epochs=5, validation_data=val_generator, callbacks=[checkpoint] ) # LEARNING CURVES # If the difference between the validation loss and training loss is too big then your model is overfitting plt.figure(figsize=(20, 8)) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("Model Loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["Train", "Val"], loc="upper left") plt.show() plt.figure(figsize=(20, 8)) plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("Model Accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["Train", "Val"], loc="upper left") plt.show() model = tf.keras.models.load_model("/kaggle/working/model_cnn.h5") y_test = val_generator.classes y_pred = model.predict(val_generator) y_pred_probs = y_pred.copy() y_pred[y_pred > 0.5] = 1 y_pred[y_pred < 0.5] = 0 from sklearn.metrics import classification_report, confusion_matrix print(classification_report(y_test, y_pred, target_names=["cats", "dogs"])) plt.figure(figsize=(10, 8)) sns.heatmap( confusion_matrix(y_test, y_pred), annot=True, fmt=".3g", xticklabels=["cats", "dogs"], yticklabels=["cats", "dogs"], cmap="Blues", ) plt.show() from sklearn.metrics import roc_curve, roc_auc_score fpr, tpr, thresholds = roc_curve(y_test, y_pred_probs) roc_auc = roc_auc_score(y_test, y_pred_probs) roc_auc # 0.5 - 1 range plt.plot(fpr, tpr, color="blue", label="ROC curve (AUC = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="red", linestyle="--", label="Random guessing") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic (ROC) curve") plt.legend(loc="lower right") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/321/129321347.ipynb
dogs-cats-images
chetankv
[{"Id": 129321347, "ScriptId": 37476098, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6542785, "CreationDate": "05/12/2023 18:51:42", "VersionNumber": 1.0, "Title": "Binary Classification", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 156.0, "LinesInsertedFromPrevious": 156.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185259746, "KernelVersionId": 129321347, "SourceDatasetVersionId": 28903}]
[{"Id": 28903, "DatasetId": 22535, "DatasourceVersionId": 28946, "CreatorUserId": 632316, "LicenseName": "CC0: Public Domain", "CreationDate": "04/19/2018 18:20:08", "VersionNumber": 1.0, "Title": "Dogs & Cats Images", "Slug": "dogs-cats-images", "Subtitle": "image classification", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 227350220.0, "TotalUncompressedBytes": 227350220.0}]
[{"Id": 22535, "CreatorUserId": 632316, "OwnerUserId": 632316.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 28903.0, "CurrentDatasourceVersionId": 28946.0, "ForumId": 30533, "Type": 2, "CreationDate": "04/19/2018 18:20:08", "LastActivityDate": "04/19/2018", "TotalViews": 143541, "TotalDownloads": 35362, "TotalVotes": 566, "TotalKernels": 233}]
[{"Id": 632316, "UserName": "chetankv", "DisplayName": "chetanimravan", "RegisterDate": "06/04/2016", "PerformanceTier": 0}]
import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import seaborn as sns plt.rcParams["figure.figsize"] = (20, 8) from tensorflow.keras.preprocessing.image import ( ImageDataGenerator, load_img, img_to_array, ) from tensorflow.keras.applications import VGG16, ResNet50, InceptionV3, MobileNet img_size = 150 batch_size = 32 train_datagen = ImageDataGenerator( rescale=1 / 255.0, rotation_range=30, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, ) # this is for normalization, basically to shorten the range val_datagen = ImageDataGenerator(rescale=1 / 255.0) train_generator = train_datagen.flow_from_directory( "/kaggle/input/dogs-cats-images/dataset/training_set", target_size=(img_size, img_size), batch_size=batch_size, shuffle=True, # shuffle the images in every iteration class_mode="binary", ) val_generator = val_datagen.flow_from_directory( "/kaggle/input/dogs-cats-images/dataset/test_set", target_size=(img_size, img_size), batch_size=batch_size, shuffle=False, class_mode="binary", ) # Visulization of 15 Random Samples from a Batch of 32 labels = ["cat", "dog"] samples = train_generator.__next__() images = samples[0] target = samples[1] plt.figure(figsize=(20, 20)) for i in range(15): plt.subplot(5, 5, i + 1) plt.subplots_adjust(hspace=0.3, wspace=0.3) plt.imshow(images[i]) plt.title(f"Class: {labels[int(target[i])]}") plt.axis("off") from tensorflow.keras.layers import ( Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation, Dropout, Flatten, Dense, Input, Layer, ) from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Input model = Sequential() model.add( Conv2D(32, (3, 3), input_shape=(150, 150, 3), activation="relu", padding="same") ) model.add(MaxPooling2D(2, 2)) model.add(Conv2D(64, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D(2, 2)) model.add(Conv2D(128, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D(2, 2)) model.add(Flatten()) model.add(Dense(units=512, activation="relu")) model.add(Dense(units=1, activation="sigmoid")) model.summary() model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping filepath = "model_cnn.h5" checkpoint = ModelCheckpoint( filepath, monitor="val_loss", verbose=1, save_best_only=True, save_weights_only=False, ) history = model.fit( train_generator, epochs=5, validation_data=val_generator, callbacks=[checkpoint] ) # LEARNING CURVES # If the difference between the validation loss and training loss is too big then your model is overfitting plt.figure(figsize=(20, 8)) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("Model Loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["Train", "Val"], loc="upper left") plt.show() plt.figure(figsize=(20, 8)) plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("Model Accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["Train", "Val"], loc="upper left") plt.show() model = tf.keras.models.load_model("/kaggle/working/model_cnn.h5") y_test = val_generator.classes y_pred = model.predict(val_generator) y_pred_probs = y_pred.copy() y_pred[y_pred > 0.5] = 1 y_pred[y_pred < 0.5] = 0 from sklearn.metrics import classification_report, confusion_matrix print(classification_report(y_test, y_pred, target_names=["cats", "dogs"])) plt.figure(figsize=(10, 8)) sns.heatmap( confusion_matrix(y_test, y_pred), annot=True, fmt=".3g", xticklabels=["cats", "dogs"], yticklabels=["cats", "dogs"], cmap="Blues", ) plt.show() from sklearn.metrics import roc_curve, roc_auc_score fpr, tpr, thresholds = roc_curve(y_test, y_pred_probs) roc_auc = roc_auc_score(y_test, y_pred_probs) roc_auc # 0.5 - 1 range plt.plot(fpr, tpr, color="blue", label="ROC curve (AUC = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="red", linestyle="--", label="Random guessing") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic (ROC) curve") plt.legend(loc="lower right") plt.show()
false
0
1,486
1
1,512
1,486
129321338
# # **Libraries** # In Python, libraries are used to extend the core functionality of the language, allowing users to perform a wide range of tasks without having to write the code from scratch. They encapsulate complex operations into simpler, more readable code, and promote code reuse, which leads to more efficient and reliable programming. Moreover, many libraries are optimized for performance, providing faster execution times compared to pure Python code. # 1. Pandas: This library provides data structures and tools for data manipulation and analysis. It's essential for working with tabular data (e.g., CSV, Excel files) and offers functions for quickly filtering, sorting, and joining data. # 2. NumPy: This is a fundamental package for scientific computing in Python. It contains functions for working with large, multi-dimensional arrays and matrices, along with a large library of mathematical functions to operate on these arrays. # 3. Matplotlib: This library is used for creating static, animated, and interactive visualizations in Python. It's a flexible and powerful tool for data visualization. # 4. Seaborn: This is built on top of matplotlib and makes it easier to create beautiful and informative statistical graphs. It aims to simplify the visualization of complex datasets. # 5. XGBoost: This library implements the gradient boosting algorithm, which is widely used in machine learning for classification, regression, and a host of other tasks. It's known for its high efficiency and flexibility. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import xgboost as xgb # # **Dataset and Manipulations** # Now, we need a dataset, arguably the most critical part of this whole process. There are many places where you can obtain data. I've chosen a Yahoo Finance dataset for Bitcoin. Keep in mind that this tutorial can be applied to other cryptocurrencies with a similar structure of datasets, albeit minor adjustments may be necessary for different ones. (https://finance.yahoo.com/quote/BTC-USD/history/) # This dataset ranges from September 17, 2014, to May 11, 2023, with daily frequencies and the following data: # 1. Open, referring to the opening price. # 2. High, the highest price for that day. # 3. Low, the lowest price for that day. # 4. Close, the closing price. # 5. Adj Close, which will be deleted as it is not relevant in the case of Bitcoin and is equal to the closing price. # 6. Volume, representing the total amount (in dollars) of Bitcoin traded during a given period (daily). # **Loading a Dataset:** # We need to "activate" the dataset through the pandas library and commands, as can be seen below. Note that the file path is established through Kaggle and may vary depending on different software applications or computer settings. Therefore, this step is individual, and for proper functioning in all scenarios, it's essential to understand how to upload a dataset. Resources like www.youtube.com can be very helpful in this regard. Regarding column deletion, the "del" command does the job. df = pd.read_csv("/kaggle/input/projekt/BTC-USD.csv") del df["Adj Close"] df["Daily_Change"] = ( (df["Close"] - df["Open"]) / df["Open"] ) * 100 # see chapter Time Series # # **Data Check:** After loading the dataset into a DataFrame, referred to as "df", it is always good practice to examine the structure and content of the dataset. You can observe that the data rows contain the variables previously explained. Additionally, you might want to check the last few rows to verify the completeness and integrity of your data. # **First Rows** df.head() # **Last Rows** df.tail() # **Index:** Since a time series is a series of data points indexed in time order, the only logical way to manipulate data is to set the date as the index ("Date"). We can achieve this with the following command. df = df.set_index("Date") df.index = pd.to_datetime(df.index) df.head() # **Graph:** Now, it is a good time to plot this data, as graphs are our best friends in visualizing patterns. It's not necessary to use a specific plot, these visualizations are largely up to your imagination and preferences. Therefore, this code can be adjusted to your liking. For more examples, see (https://seaborn.pydata.org/examples/index.html). sns.set_theme(style="darkgrid") df.plot(y="Close", color="orange", title="BTC Closing Price") # # **Model Training** # Training a model is a crucial phase in the machine learning process. It involves feeding a dataset into an algorithm, which learns from the data patterns and makes predictions or decisions without being specifically programmed to perform the task. This phase requires careful selection of a suitable model and fine-tuning of parameters to optimize performance. Remember, the goal is to create a model that not only fits the training data well but can also generalize effectively to new, unseen data. # **Splitting:** I am setting the date as 01-01-2022, where data before this will be used for training the model, and data after will be used for testing, as can be seen in the graph. The first part of the code splits the dataset into two parts, and while any date can be selected, keep in mind that the division of data into a training set and a test set depends on the specific circumstances of your project. A generally accepted rule in the field of machine learning is to allocate 70-80% of the data for training and the remaining for testing. Therefore, this particular date has been chosen. The subsequent code will divide the data accordingly and provide a visualization of the split. def plot_time_series(df, mask, change_date): fig, ax = plt.subplots() ax.plot(df.loc[mask, "Close"], color="orange") ax.plot(df.loc[~mask, "Close"], color="black") ax.axvline(change_date, color="black", linestyle="--") plt.show() change_date = pd.to_datetime("2022-01-01") train = df.loc[df.index < change_date] test = df.loc[df.index >= change_date] mask = df.index < change_date plot_time_series(df, mask, change_date) # # **Features** # In the realm of predictive analysis using a machine learning model, features within a dataset are distinct quantifiable properties or traits of the observed entities. These features, alternately known as attributes or variables, serve as the input from which the model learns patterns, thereby enabling it to make predictions or decisions. For example, in a time-series forecasting scenario, features may encompass historical values of the variable we aim to predict, elements of date or time, or other external variables that might impact the variable being forecasted. To handle these features, one would generally utilize data manipulation libraries in Python, like pandas in our case, wherein features can be chosen, modified, or created using a range of functions and methods. Bear in mind, the process of feature selection and engineering is a pivotal phase in the development of an efficient machine learning model, given that the model's effectiveness is significantly influenced by the quality and pertinence of the chosen features. # **Time Series:** Given that we are working with time series data, there are several aspects we must consider. A crucial one is the issue of stationarity, or more specifically, non-stationary time series. As the Bitcoin price is significantly non-stationary (statistical testing is beyond the scope of this work), there are steps we need to take into account. Instead of predicting the closing price, I will be predicting daily changes (in %) as tests demonstrate these are stationary and hence suitable for further use. from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt def perform_adf_test(series): result = adfuller(series) print(f"ADF test statistic: {result[0]}") print(f"p-value: {result[1]}") def plot_data(df, mask, change_date): fig, ax = plt.subplots() ax.plot(df.index[mask], df.loc[mask, "Daily_Change"], color="orange") ax.plot(df.index[~mask], df.loc[~mask, "Daily_Change"], color="black") ax.axvline(change_date, color="black", linestyle="--") plt.show() perform_adf_test(df["Daily_Change"]) plot_data(df, mask, change_date) # **Specific Features:** In our endeavor to predict Bitcoin prices using machine learning, we're not merely confined to the historical data of Bitcoin itself. Incorporating various external indicators such as the stock market index, 30-year Treasury bond rates, and the values of stable coins like USDT, can provide additional context and potentially enhance the model's predictive power. These external factors depict broader economic conditions, which are known to influence cryptocurrency markets. Furthermore, temporal aspects like the specific month or week of the year can also serve as critical features. For instance, some patterns may be associated with certain times of the year or week due to trading behaviors or recurring events, and including these as features in our model can further improve its accuracy. It's a delicate balance of capturing complexity without overfitting, keeping in mind that each added feature contributes to the dimensionality of the model. One more challenge is that not every data set is available on the same date, hence not all of these can be utilized with a machine learning model. As ETFs, particularly SPY, show a correlation with Bitcoin price action, the decision to sacrifice weekend data to enrich the dataset with this knowledge is chosen. from sklearn.impute import SimpleImputer import numpy as np import pandas as pd def create_features(df, df2): # Sloučení dat df = df.merge(df2, left_index=True, right_index=True, how="inner") # Předzpracování df.ffill(inplace=True) df.bfill(inplace=True) # Vytváření nových vlastností df["Year"], df["Month"], df["Day"], df["DayOfWeek"] = ( df.index.year, df.index.month, df.index.day, df.index.dayofweek, ) df.sort_index(inplace=True) df.index = pd.to_datetime(df.index) halving_dates = [ pd.to_datetime("2012-11-28"), pd.to_datetime("2016-07-09"), pd.to_datetime("2020-05-11"), pd.to_datetime("2024-05-06"), ] df["days_to_halving"] = np.nan for i in range(len(halving_dates) - 1): mask = (df.index >= halving_dates[i]) & (df.index < halving_dates[i + 1]) df.loc[mask, "days_to_halving"] = ( halving_dates[i + 1] - df.loc[mask].index ).days return df # Předpokládáme, že df a df2 jsou již definovány df = create_features(df, df2) print(df) from sklearn.impute import SimpleImputer df2 = ( pd.read_csv( "/kaggle/input/projekt2/SPY.csv", parse_dates=["Date"], index_col="Date" ) .drop(columns=["Adj Close"]) .add_prefix("df2_") ) df2.ffill(inplace=True) df2.bfill(inplace=True) df = df.merge(df2, left_index=True, right_index=True, how="inner") df["Year"], df["Month"], df["Day"], df["DayOfWeek"] = ( df.index.year, df.index.month, df.index.day, df.index.dayofweek, ) print(df.head()) # **Halving:** Is a significant event specific to Bitcoin that is often included as a feature in forecasting models. It occurs approximately every four years and involves cutting the block reward in half, resulting in a reduction in the rate at which new Bitcoins are created. There are several reasons why halving is considered an important feature in Bitcoin forecasting. Firstly, it leads to a reduction in the supply of Bitcoin, which has implications for the balance between supply and demand in the market. Secondly, halving events generate market attention and can influence investor sentiment and behavior. Traders and investors closely follow these events and anticipate their impact on Bitcoin's price dynamics. Additionally, historical price patterns associated with previous halvings can provide insights into how the market has reacted in the past. df = df.sort_index() df.index = pd.to_datetime(df.index) halving_dates = [ pd.to_datetime("2012-11-28"), pd.to_datetime("2016-07-09"), pd.to_datetime("2020-05-11"), pd.to_datetime("2024-05-06"), ] df["days_to_halving"] = np.nan for i in range(len(halving_dates) - 1): mask = (df.index >= halving_dates[i]) & (df.index < halving_dates[i + 1]) df.loc[mask, "days_to_halving"] = (halving_dates[i + 1] - df.loc[mask].index).days print(df) # # **Model** train = create_features(train) test = create_features(test) FEATURES = [ "Open", "High", "Low", "Close", "Volume", "df2_Open", "df2_High", "df2_Low", "df2_Close", "df2_Volume", "Year", "Month", "Day", "DayOfWeek", "days_to_halving", ] TARGET = "Daily_Chang" X_train = train[FEATURES] y_train = train[TARGET] X_test = test[FEATURES] y_test = test[TARGET] from sklearn.metrics import mean_squared_error reg = xgb.XGBRegressor( base_score=0.5, booster="gbtree", n_estimators=1000, early_stopping_rounds=50, objective="reg:linear", max_depth=3, learning_rate=0.01, ) reg.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=100) # # **Cross Validation** # from sklearn.metrics import accuracy_score, roc_auc_score from sklearn.model_selection import ( train_test_split, TimeSeriesSplit, KFold, StratifiedKFold, GroupKFold, StratifiedGroupKFold, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/321/129321338.ipynb
null
null
[{"Id": 129321338, "ScriptId": 38400176, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13957882, "CreationDate": "05/12/2023 18:51:39", "VersionNumber": 2.0, "Title": "semestr\u00e1ln\u00ed", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 191.0, "LinesInsertedFromPrevious": 119.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 72.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # **Libraries** # In Python, libraries are used to extend the core functionality of the language, allowing users to perform a wide range of tasks without having to write the code from scratch. They encapsulate complex operations into simpler, more readable code, and promote code reuse, which leads to more efficient and reliable programming. Moreover, many libraries are optimized for performance, providing faster execution times compared to pure Python code. # 1. Pandas: This library provides data structures and tools for data manipulation and analysis. It's essential for working with tabular data (e.g., CSV, Excel files) and offers functions for quickly filtering, sorting, and joining data. # 2. NumPy: This is a fundamental package for scientific computing in Python. It contains functions for working with large, multi-dimensional arrays and matrices, along with a large library of mathematical functions to operate on these arrays. # 3. Matplotlib: This library is used for creating static, animated, and interactive visualizations in Python. It's a flexible and powerful tool for data visualization. # 4. Seaborn: This is built on top of matplotlib and makes it easier to create beautiful and informative statistical graphs. It aims to simplify the visualization of complex datasets. # 5. XGBoost: This library implements the gradient boosting algorithm, which is widely used in machine learning for classification, regression, and a host of other tasks. It's known for its high efficiency and flexibility. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import xgboost as xgb # # **Dataset and Manipulations** # Now, we need a dataset, arguably the most critical part of this whole process. There are many places where you can obtain data. I've chosen a Yahoo Finance dataset for Bitcoin. Keep in mind that this tutorial can be applied to other cryptocurrencies with a similar structure of datasets, albeit minor adjustments may be necessary for different ones. (https://finance.yahoo.com/quote/BTC-USD/history/) # This dataset ranges from September 17, 2014, to May 11, 2023, with daily frequencies and the following data: # 1. Open, referring to the opening price. # 2. High, the highest price for that day. # 3. Low, the lowest price for that day. # 4. Close, the closing price. # 5. Adj Close, which will be deleted as it is not relevant in the case of Bitcoin and is equal to the closing price. # 6. Volume, representing the total amount (in dollars) of Bitcoin traded during a given period (daily). # **Loading a Dataset:** # We need to "activate" the dataset through the pandas library and commands, as can be seen below. Note that the file path is established through Kaggle and may vary depending on different software applications or computer settings. Therefore, this step is individual, and for proper functioning in all scenarios, it's essential to understand how to upload a dataset. Resources like www.youtube.com can be very helpful in this regard. Regarding column deletion, the "del" command does the job. df = pd.read_csv("/kaggle/input/projekt/BTC-USD.csv") del df["Adj Close"] df["Daily_Change"] = ( (df["Close"] - df["Open"]) / df["Open"] ) * 100 # see chapter Time Series # # **Data Check:** After loading the dataset into a DataFrame, referred to as "df", it is always good practice to examine the structure and content of the dataset. You can observe that the data rows contain the variables previously explained. Additionally, you might want to check the last few rows to verify the completeness and integrity of your data. # **First Rows** df.head() # **Last Rows** df.tail() # **Index:** Since a time series is a series of data points indexed in time order, the only logical way to manipulate data is to set the date as the index ("Date"). We can achieve this with the following command. df = df.set_index("Date") df.index = pd.to_datetime(df.index) df.head() # **Graph:** Now, it is a good time to plot this data, as graphs are our best friends in visualizing patterns. It's not necessary to use a specific plot, these visualizations are largely up to your imagination and preferences. Therefore, this code can be adjusted to your liking. For more examples, see (https://seaborn.pydata.org/examples/index.html). sns.set_theme(style="darkgrid") df.plot(y="Close", color="orange", title="BTC Closing Price") # # **Model Training** # Training a model is a crucial phase in the machine learning process. It involves feeding a dataset into an algorithm, which learns from the data patterns and makes predictions or decisions without being specifically programmed to perform the task. This phase requires careful selection of a suitable model and fine-tuning of parameters to optimize performance. Remember, the goal is to create a model that not only fits the training data well but can also generalize effectively to new, unseen data. # **Splitting:** I am setting the date as 01-01-2022, where data before this will be used for training the model, and data after will be used for testing, as can be seen in the graph. The first part of the code splits the dataset into two parts, and while any date can be selected, keep in mind that the division of data into a training set and a test set depends on the specific circumstances of your project. A generally accepted rule in the field of machine learning is to allocate 70-80% of the data for training and the remaining for testing. Therefore, this particular date has been chosen. The subsequent code will divide the data accordingly and provide a visualization of the split. def plot_time_series(df, mask, change_date): fig, ax = plt.subplots() ax.plot(df.loc[mask, "Close"], color="orange") ax.plot(df.loc[~mask, "Close"], color="black") ax.axvline(change_date, color="black", linestyle="--") plt.show() change_date = pd.to_datetime("2022-01-01") train = df.loc[df.index < change_date] test = df.loc[df.index >= change_date] mask = df.index < change_date plot_time_series(df, mask, change_date) # # **Features** # In the realm of predictive analysis using a machine learning model, features within a dataset are distinct quantifiable properties or traits of the observed entities. These features, alternately known as attributes or variables, serve as the input from which the model learns patterns, thereby enabling it to make predictions or decisions. For example, in a time-series forecasting scenario, features may encompass historical values of the variable we aim to predict, elements of date or time, or other external variables that might impact the variable being forecasted. To handle these features, one would generally utilize data manipulation libraries in Python, like pandas in our case, wherein features can be chosen, modified, or created using a range of functions and methods. Bear in mind, the process of feature selection and engineering is a pivotal phase in the development of an efficient machine learning model, given that the model's effectiveness is significantly influenced by the quality and pertinence of the chosen features. # **Time Series:** Given that we are working with time series data, there are several aspects we must consider. A crucial one is the issue of stationarity, or more specifically, non-stationary time series. As the Bitcoin price is significantly non-stationary (statistical testing is beyond the scope of this work), there are steps we need to take into account. Instead of predicting the closing price, I will be predicting daily changes (in %) as tests demonstrate these are stationary and hence suitable for further use. from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt def perform_adf_test(series): result = adfuller(series) print(f"ADF test statistic: {result[0]}") print(f"p-value: {result[1]}") def plot_data(df, mask, change_date): fig, ax = plt.subplots() ax.plot(df.index[mask], df.loc[mask, "Daily_Change"], color="orange") ax.plot(df.index[~mask], df.loc[~mask, "Daily_Change"], color="black") ax.axvline(change_date, color="black", linestyle="--") plt.show() perform_adf_test(df["Daily_Change"]) plot_data(df, mask, change_date) # **Specific Features:** In our endeavor to predict Bitcoin prices using machine learning, we're not merely confined to the historical data of Bitcoin itself. Incorporating various external indicators such as the stock market index, 30-year Treasury bond rates, and the values of stable coins like USDT, can provide additional context and potentially enhance the model's predictive power. These external factors depict broader economic conditions, which are known to influence cryptocurrency markets. Furthermore, temporal aspects like the specific month or week of the year can also serve as critical features. For instance, some patterns may be associated with certain times of the year or week due to trading behaviors or recurring events, and including these as features in our model can further improve its accuracy. It's a delicate balance of capturing complexity without overfitting, keeping in mind that each added feature contributes to the dimensionality of the model. One more challenge is that not every data set is available on the same date, hence not all of these can be utilized with a machine learning model. As ETFs, particularly SPY, show a correlation with Bitcoin price action, the decision to sacrifice weekend data to enrich the dataset with this knowledge is chosen. from sklearn.impute import SimpleImputer import numpy as np import pandas as pd def create_features(df, df2): # Sloučení dat df = df.merge(df2, left_index=True, right_index=True, how="inner") # Předzpracování df.ffill(inplace=True) df.bfill(inplace=True) # Vytváření nových vlastností df["Year"], df["Month"], df["Day"], df["DayOfWeek"] = ( df.index.year, df.index.month, df.index.day, df.index.dayofweek, ) df.sort_index(inplace=True) df.index = pd.to_datetime(df.index) halving_dates = [ pd.to_datetime("2012-11-28"), pd.to_datetime("2016-07-09"), pd.to_datetime("2020-05-11"), pd.to_datetime("2024-05-06"), ] df["days_to_halving"] = np.nan for i in range(len(halving_dates) - 1): mask = (df.index >= halving_dates[i]) & (df.index < halving_dates[i + 1]) df.loc[mask, "days_to_halving"] = ( halving_dates[i + 1] - df.loc[mask].index ).days return df # Předpokládáme, že df a df2 jsou již definovány df = create_features(df, df2) print(df) from sklearn.impute import SimpleImputer df2 = ( pd.read_csv( "/kaggle/input/projekt2/SPY.csv", parse_dates=["Date"], index_col="Date" ) .drop(columns=["Adj Close"]) .add_prefix("df2_") ) df2.ffill(inplace=True) df2.bfill(inplace=True) df = df.merge(df2, left_index=True, right_index=True, how="inner") df["Year"], df["Month"], df["Day"], df["DayOfWeek"] = ( df.index.year, df.index.month, df.index.day, df.index.dayofweek, ) print(df.head()) # **Halving:** Is a significant event specific to Bitcoin that is often included as a feature in forecasting models. It occurs approximately every four years and involves cutting the block reward in half, resulting in a reduction in the rate at which new Bitcoins are created. There are several reasons why halving is considered an important feature in Bitcoin forecasting. Firstly, it leads to a reduction in the supply of Bitcoin, which has implications for the balance between supply and demand in the market. Secondly, halving events generate market attention and can influence investor sentiment and behavior. Traders and investors closely follow these events and anticipate their impact on Bitcoin's price dynamics. Additionally, historical price patterns associated with previous halvings can provide insights into how the market has reacted in the past. df = df.sort_index() df.index = pd.to_datetime(df.index) halving_dates = [ pd.to_datetime("2012-11-28"), pd.to_datetime("2016-07-09"), pd.to_datetime("2020-05-11"), pd.to_datetime("2024-05-06"), ] df["days_to_halving"] = np.nan for i in range(len(halving_dates) - 1): mask = (df.index >= halving_dates[i]) & (df.index < halving_dates[i + 1]) df.loc[mask, "days_to_halving"] = (halving_dates[i + 1] - df.loc[mask].index).days print(df) # # **Model** train = create_features(train) test = create_features(test) FEATURES = [ "Open", "High", "Low", "Close", "Volume", "df2_Open", "df2_High", "df2_Low", "df2_Close", "df2_Volume", "Year", "Month", "Day", "DayOfWeek", "days_to_halving", ] TARGET = "Daily_Chang" X_train = train[FEATURES] y_train = train[TARGET] X_test = test[FEATURES] y_test = test[TARGET] from sklearn.metrics import mean_squared_error reg = xgb.XGBRegressor( base_score=0.5, booster="gbtree", n_estimators=1000, early_stopping_rounds=50, objective="reg:linear", max_depth=3, learning_rate=0.01, ) reg.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=100) # # **Cross Validation** # from sklearn.metrics import accuracy_score, roc_auc_score from sklearn.model_selection import ( train_test_split, TimeSeriesSplit, KFold, StratifiedKFold, GroupKFold, StratifiedGroupKFold, )
false
0
3,584
0
3,584
3,584
129321791
<jupyter_start><jupyter_text>Animals-10 Hello everyone! This is the dataset I have used for my matriculation thesis. It contains about 28K medium quality animal images belonging to 10 categories: dog, cat, horse, spyder, butterfly, chicken, sheep, cow, squirrel, elephant. I have used it to test different image recognition networks: from homemade CNNs (~80% accuracy) to Google Inception (98%). It could simulate a smart gallery for a researcher (like a biologist). All the images have been collected from "google images" and have been checked by human. There is some erroneous data to simulate real conditions (eg. images taken by users of your app). The main directory is divided into folders, one for each category. Image count for each category varies from 2K to 5 K units. Kaggle dataset identifier: animals10 <jupyter_script>import numpy as np import pandas as pd import os import tensorflow as tf import matplotlib.pyplot as plt import seaborn as sns from tensorflow.keras.preprocessing.image import ( ImageDataGenerator, load_img, img_to_array, ) from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, GlobalAveragePooling2D from tensorflow.keras.applications import VGG16 plt.rcParams["font.size"] = 14 batch_size = 32 img_size = 224 directory = "/kaggle/input/animals10/raw-img" datagen = ImageDataGenerator( rescale=1 / 255.0, zoom_range=0.2, horizontal_flip=True, validation_split=0.15 ) train_generator = datagen.flow_from_directory( directory, target_size=(img_size, img_size), batch_size=batch_size, shuffle=True, subset="training", class_mode="categorical", ) validation_generator = datagen.flow_from_directory( directory, target_size=(img_size, img_size), batch_size=batch_size, shuffle=False, subset="validation", class_mode="categorical", ) # train_generator.__dict__ # train_generator.class_indices [key for key in train_generator.class_indices] labels = [k for k in train_generator.class_indices] sample_generate = train_generator.__next__() images = sample_generate[0] titles = sample_generate[1] plt.figure(figsize=(20, 20)) for i in range(15): plt.subplot(5, 5, i + 1) plt.subplots_adjust(hspace=0.3, wspace=0.3) plt.imshow(images[i]) plt.title(f"Class: {labels[np.argmax(titles[i],axis=0)]}") plt.axis("off") # VGG16 img_size = 224 base_model = VGG16( include_top=False, # include_top is to include the classifier layer; whether to include the 3 fully-connected layers at the top of the network weights="imagenet", input_shape=(img_size, img_size, 3), ) base_model.summary() # Freezing the bottom layers base_model.layers for layer in base_model.layers[:-4]: layer.trainable = False base_model.summary() # When we set a lr and convergence is slow, increase lr # reduce lr when the convergence is a plateau from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping model_name = "model.h5" checkpoint = ModelCheckpoint( model_name, monitor="val_loss", mode="min", save_best_only=True, verbose=1 ) earlystopping = EarlyStopping( monitor="val_loss", min_delta=0, patience=5, verbose=1, restore_best_weights=True ) last_output = base_model.output x = GlobalAveragePooling2D()(last_output) x = Dense(512, activation="relu")(x) outputs = Dense(10, activation="softmax")(x) model = Model(inputs=base_model.inputs, outputs=outputs) model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss="categorical_crossentropy", metrics=["accuracy"], ) history = model.fit( train_generator, epochs=3, validation_data=validation_generator, callbacks=[checkpoint, earlystopping], ) # Epoch 00010: val_loss did not improve from 0.22283 # Restoring model weights from the end of the best epoch. # Epoch 00010: early stopping plt.figure(figsize=(20, 8)) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("Model Loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["Train", "Val"], loc="upper left") plt.show() plt.figure(figsize=(20, 8)) plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("Model Accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["Train", "Val"], loc="upper left") plt.show() from sklearn.metrics import classification_report, confusion_matrix labels model = tf.keras.models.load_model("/kaggle/working/model.h5") y_test = validation_generator.classes y_pred = model.predict(validation_generator) y_pred_probs = y_pred.copy() y_test y_pred_int = np.argmax(y_pred_probs, axis=1) print(classification_report(y_test, y_pred_int, target_names=labels)) plt.figure(figsize=(10, 8)) sns.heatmap( confusion_matrix(y_test, y_pred_int), annot=True, fmt=".3g", xticklabels=labels, yticklabels=labels, cmap="Blues", ) plt.show() y = confusion_matrix(y_test, y_pred_int) print(y)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/321/129321791.ipynb
animals10
alessiocorrado99
[{"Id": 129321791, "ScriptId": 37432569, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6542785, "CreationDate": "05/12/2023 18:57:27", "VersionNumber": 1.0, "Title": "Multiclass Classification", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 152.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 185260454, "KernelVersionId": 129321791, "SourceDatasetVersionId": 840806}]
[{"Id": 840806, "DatasetId": 59760, "DatasourceVersionId": 863703, "CreatorUserId": 1831139, "LicenseName": "GPL 2", "CreationDate": "12/12/2019 20:46:33", "VersionNumber": 2.0, "Title": "Animals-10", "Slug": "animals10", "Subtitle": "Animal pictures of 10 different categories taken from google images", "Description": "Hello everyone! \n\nThis is the dataset I have used for my matriculation thesis. \n\nIt contains about 28K medium quality animal images belonging to 10 categories: dog, cat, horse, spyder, butterfly, chicken, sheep, cow, squirrel, elephant. \n\nI have used it to test different image recognition networks: from homemade CNNs (~80% accuracy) to Google Inception (98%). It could simulate a smart gallery for a researcher (like a biologist).\n\nAll the images have been collected from \"google images\" and have been checked by human. There is some erroneous data to simulate real conditions (eg. images taken by users of your app).\n\nThe main directory is divided into folders, one for each category. Image count for each category varies from 2K to 5 K units.", "VersionNotes": "v2", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 59760, "CreatorUserId": 1831139, "OwnerUserId": 1831139.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 840806.0, "CurrentDatasourceVersionId": 863703.0, "ForumId": 68632, "Type": 2, "CreationDate": "10/04/2018 21:16:26", "LastActivityDate": "10/04/2018", "TotalViews": 250550, "TotalDownloads": 39974, "TotalVotes": 714, "TotalKernels": 131}]
[{"Id": 1831139, "UserName": "alessiocorrado99", "DisplayName": "Corrado Alessio", "RegisterDate": "04/17/2018", "PerformanceTier": 1}]
import numpy as np import pandas as pd import os import tensorflow as tf import matplotlib.pyplot as plt import seaborn as sns from tensorflow.keras.preprocessing.image import ( ImageDataGenerator, load_img, img_to_array, ) from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, GlobalAveragePooling2D from tensorflow.keras.applications import VGG16 plt.rcParams["font.size"] = 14 batch_size = 32 img_size = 224 directory = "/kaggle/input/animals10/raw-img" datagen = ImageDataGenerator( rescale=1 / 255.0, zoom_range=0.2, horizontal_flip=True, validation_split=0.15 ) train_generator = datagen.flow_from_directory( directory, target_size=(img_size, img_size), batch_size=batch_size, shuffle=True, subset="training", class_mode="categorical", ) validation_generator = datagen.flow_from_directory( directory, target_size=(img_size, img_size), batch_size=batch_size, shuffle=False, subset="validation", class_mode="categorical", ) # train_generator.__dict__ # train_generator.class_indices [key for key in train_generator.class_indices] labels = [k for k in train_generator.class_indices] sample_generate = train_generator.__next__() images = sample_generate[0] titles = sample_generate[1] plt.figure(figsize=(20, 20)) for i in range(15): plt.subplot(5, 5, i + 1) plt.subplots_adjust(hspace=0.3, wspace=0.3) plt.imshow(images[i]) plt.title(f"Class: {labels[np.argmax(titles[i],axis=0)]}") plt.axis("off") # VGG16 img_size = 224 base_model = VGG16( include_top=False, # include_top is to include the classifier layer; whether to include the 3 fully-connected layers at the top of the network weights="imagenet", input_shape=(img_size, img_size, 3), ) base_model.summary() # Freezing the bottom layers base_model.layers for layer in base_model.layers[:-4]: layer.trainable = False base_model.summary() # When we set a lr and convergence is slow, increase lr # reduce lr when the convergence is a plateau from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping model_name = "model.h5" checkpoint = ModelCheckpoint( model_name, monitor="val_loss", mode="min", save_best_only=True, verbose=1 ) earlystopping = EarlyStopping( monitor="val_loss", min_delta=0, patience=5, verbose=1, restore_best_weights=True ) last_output = base_model.output x = GlobalAveragePooling2D()(last_output) x = Dense(512, activation="relu")(x) outputs = Dense(10, activation="softmax")(x) model = Model(inputs=base_model.inputs, outputs=outputs) model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss="categorical_crossentropy", metrics=["accuracy"], ) history = model.fit( train_generator, epochs=3, validation_data=validation_generator, callbacks=[checkpoint, earlystopping], ) # Epoch 00010: val_loss did not improve from 0.22283 # Restoring model weights from the end of the best epoch. # Epoch 00010: early stopping plt.figure(figsize=(20, 8)) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("Model Loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["Train", "Val"], loc="upper left") plt.show() plt.figure(figsize=(20, 8)) plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("Model Accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["Train", "Val"], loc="upper left") plt.show() from sklearn.metrics import classification_report, confusion_matrix labels model = tf.keras.models.load_model("/kaggle/working/model.h5") y_test = validation_generator.classes y_pred = model.predict(validation_generator) y_pred_probs = y_pred.copy() y_test y_pred_int = np.argmax(y_pred_probs, axis=1) print(classification_report(y_test, y_pred_int, target_names=labels)) plt.figure(figsize=(10, 8)) sns.heatmap( confusion_matrix(y_test, y_pred_int), annot=True, fmt=".3g", xticklabels=labels, yticklabels=labels, cmap="Blues", ) plt.show() y = confusion_matrix(y_test, y_pred_int) print(y)
false
0
1,321
3
1,540
1,321
129321131
<jupyter_start><jupyter_text>Aeroclub 2023 Kaggle dataset identifier: aeroclub-2023 <jupyter_script>import numpy as np import pandas as pd import os # Категории (Из pdf'ки) # Оформление # Запрос # вариантов # Бронирование # Отмена # Не заявка # task_1_train = pd.read_excel("/kaggle/input/aeroclub-2023/1/Задача №1/train_data.xlsx") task_1_train.head() task_1_train["title"][7] task_1_train["text"][13]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/321/129321131.ipynb
aeroclub-2023
dimka11
[{"Id": 129321131, "ScriptId": 38449206, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2778887, "CreationDate": "05/12/2023 18:49:22", "VersionNumber": 2.0, "Title": "Aeroclub 2023 EDA Both tasks", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 30.0, "LinesInsertedFromPrevious": 21.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 9.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185259347, "KernelVersionId": 129321131, "SourceDatasetVersionId": 5671957}]
[{"Id": 5671957, "DatasetId": 3260672, "DatasourceVersionId": 5747475, "CreatorUserId": 2778887, "LicenseName": "Unknown", "CreationDate": "05/12/2023 18:18:42", "VersionNumber": 1.0, "Title": "Aeroclub 2023", "Slug": "aeroclub-2023", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3260672, "CreatorUserId": 2778887, "OwnerUserId": 2778887.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5671957.0, "CurrentDatasourceVersionId": 5747475.0, "ForumId": 3326228, "Type": 2, "CreationDate": "05/12/2023 18:18:42", "LastActivityDate": "05/12/2023", "TotalViews": 47, "TotalDownloads": 3, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 2778887, "UserName": "dimka11", "DisplayName": "Dmitry Sokolov", "RegisterDate": "02/04/2019", "PerformanceTier": 1}]
import numpy as np import pandas as pd import os # Категории (Из pdf'ки) # Оформление # Запрос # вариантов # Бронирование # Отмена # Не заявка # task_1_train = pd.read_excel("/kaggle/input/aeroclub-2023/1/Задача №1/train_data.xlsx") task_1_train.head() task_1_train["title"][7] task_1_train["text"][13]
false
0
144
0
176
144
129356682
from fasteda import fast_eda import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, MinMaxScaler train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") def scale_data(df): scaler = StandardScaler() float_cols = df.select_dtypes(include=["float64"]) scaled_data = scaler.fit_transform(df[list(float_cols)]) train[list(float_cols)] = scaled_data return df test # ### [fasteda](https://github.com/Matt-OP/fasteda) on the train set with target = "Class" fast_eda(train, target="Class") top_n_corr_features = 25 corr_matrix = train.corr(numeric_only=True).abs() corr_pairs = corr_matrix.unstack().sort_values(ascending=False) corr_pairs = corr_pairs[ corr_pairs.index.get_level_values(0) != corr_pairs.index.get_level_values(1) ] top_n_corr_pairs = corr_pairs[: top_n_corr_features * 2] duplicate_pairs = set() feature_tuple, correlation = [], [] for pair in top_n_corr_pairs.index: if (pair[0], pair[1]) not in duplicate_pairs and ( pair[1], pair[0], ) not in duplicate_pairs: feature_tuple.append((pair[0], pair[1])) correlation.append(round(top_n_corr_pairs[pair], 3)) duplicate_pairs.add((pair[0], pair[1])) plt.style.use("dark_background") plt.figure(figsize=(10, 12)) ax = sns.barplot( x=correlation, y=[str(feats) for feats in feature_tuple], palette=sns.color_palette("Blues_r", n_colors=len(feature_tuple)), width=0.7, linewidth=1.2, edgecolor="#FFFFFF", ) for container in ax.containers: ax.bar_label(container, size=10, padding=5) plt.title(f"Top {top_n_corr_features} feature pairs with highest Pearson correlation") plt.xlim(0, 1.05) plt.xlabel("Pearson correlation") plt.ylabel("Feature pairs") plt.grid(False) plt.show() unique_counts = [np.unique(train[col]).size for col in train.columns[1:-1]] name_count_pairs = [ (col, unique_counts[i]) for i, col in enumerate(train.columns[1:-1]) ] sorted_pairs = sorted(name_count_pairs, key=lambda x: x[1], reverse=True) plt.style.use("dark_background") plt.figure(figsize=(10, 14)) ax = sns.barplot( x=[pair[1] for pair in sorted_pairs], y=[pair[0] for pair in sorted_pairs], palette=sns.color_palette("Reds_r", n_colors=len(sorted_pairs)), width=0.6, linewidth=1, edgecolor="#FFFFFF", ) for container in ax.containers: ax.bar_label(container, size=10, padding=5) plt.title(f"Unique counts for each feature | n rows in train = {len(train)}") plt.xlabel("Number of unique samples") plt.ylabel("Features") plt.grid(False) plt.show() colors = sns.color_palette("viridis") for i, col in enumerate(greeks.columns[1:-1]): plt.figure(figsize=(8, 6)) ax = ( greeks[col] .value_counts(ascending=True) .plot.barh(color=colors[i], edgecolor="#FFFFFF") ) for container in ax.containers: ax.bar_label(container, size=8, padding=3) plt.title(f"Value counts of {col} in greeks.csv") plt.grid(False) plt.show() train["is_train"] = 1 test["is_train"] = 0 train_test = pd.concat([train, test], ignore_index=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/356/129356682.ipynb
null
null
[{"Id": 129356682, "ScriptId": 38412154, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10590800, "CreationDate": "05/13/2023 05:06:57", "VersionNumber": 2.0, "Title": "ICR - Identifying Age-Related Conditions EDA \u2728", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 79.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 19.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
from fasteda import fast_eda import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, MinMaxScaler train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") def scale_data(df): scaler = StandardScaler() float_cols = df.select_dtypes(include=["float64"]) scaled_data = scaler.fit_transform(df[list(float_cols)]) train[list(float_cols)] = scaled_data return df test # ### [fasteda](https://github.com/Matt-OP/fasteda) on the train set with target = "Class" fast_eda(train, target="Class") top_n_corr_features = 25 corr_matrix = train.corr(numeric_only=True).abs() corr_pairs = corr_matrix.unstack().sort_values(ascending=False) corr_pairs = corr_pairs[ corr_pairs.index.get_level_values(0) != corr_pairs.index.get_level_values(1) ] top_n_corr_pairs = corr_pairs[: top_n_corr_features * 2] duplicate_pairs = set() feature_tuple, correlation = [], [] for pair in top_n_corr_pairs.index: if (pair[0], pair[1]) not in duplicate_pairs and ( pair[1], pair[0], ) not in duplicate_pairs: feature_tuple.append((pair[0], pair[1])) correlation.append(round(top_n_corr_pairs[pair], 3)) duplicate_pairs.add((pair[0], pair[1])) plt.style.use("dark_background") plt.figure(figsize=(10, 12)) ax = sns.barplot( x=correlation, y=[str(feats) for feats in feature_tuple], palette=sns.color_palette("Blues_r", n_colors=len(feature_tuple)), width=0.7, linewidth=1.2, edgecolor="#FFFFFF", ) for container in ax.containers: ax.bar_label(container, size=10, padding=5) plt.title(f"Top {top_n_corr_features} feature pairs with highest Pearson correlation") plt.xlim(0, 1.05) plt.xlabel("Pearson correlation") plt.ylabel("Feature pairs") plt.grid(False) plt.show() unique_counts = [np.unique(train[col]).size for col in train.columns[1:-1]] name_count_pairs = [ (col, unique_counts[i]) for i, col in enumerate(train.columns[1:-1]) ] sorted_pairs = sorted(name_count_pairs, key=lambda x: x[1], reverse=True) plt.style.use("dark_background") plt.figure(figsize=(10, 14)) ax = sns.barplot( x=[pair[1] for pair in sorted_pairs], y=[pair[0] for pair in sorted_pairs], palette=sns.color_palette("Reds_r", n_colors=len(sorted_pairs)), width=0.6, linewidth=1, edgecolor="#FFFFFF", ) for container in ax.containers: ax.bar_label(container, size=10, padding=5) plt.title(f"Unique counts for each feature | n rows in train = {len(train)}") plt.xlabel("Number of unique samples") plt.ylabel("Features") plt.grid(False) plt.show() colors = sns.color_palette("viridis") for i, col in enumerate(greeks.columns[1:-1]): plt.figure(figsize=(8, 6)) ax = ( greeks[col] .value_counts(ascending=True) .plot.barh(color=colors[i], edgecolor="#FFFFFF") ) for container in ax.containers: ax.bar_label(container, size=8, padding=3) plt.title(f"Value counts of {col} in greeks.csv") plt.grid(False) plt.show() train["is_train"] = 1 test["is_train"] = 0 train_test = pd.concat([train, test], ignore_index=True)
false
0
1,121
1
1,121
1,121
129251033
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Load the CSV file into a DataFrame df = pd.read_csv("/kaggle/input/results/results.csv") # Define metrics where a higher value is better (default is lower is better) higher_is_better = ["R2"] # Initialize an empty DataFrame to store ranks rank_df = pd.DataFrame() # Iterate over each column, rank and add the ranks to the rank_df for column in df.columns[1:]: # Skip the 'Model' column if any(metric in column for metric in higher_is_better): rank_df[column] = df[column].rank(ascending=False) # Higher is better else: rank_df[column] = df[column].rank(ascending=True) # Lower is better # Add 'Model' column to rank_df rank_df.insert(0, "Model", df["Model"]) # Calculate the average rank for each model across all metrics for each analytical measure rank_df["Average Rank"] = rank_df.iloc[:, 1:].mean(axis=1) # Sort by 'Average Rank' rank_df = rank_df.sort_values("Average Rank") # Display the final rankings final_rankings = rank_df[["Model", "Average Rank"]] print(final_rankings) import matplotlib.pyplot as plt import seaborn as sns # Create the plot plt.figure(figsize=(10, 8)) sns.barplot( x=final_rankings["Average Rank"], y=final_rankings["Model"], palette="viridis" ) # Add labels and title plt.xlabel("Average Rank") plt.ylabel("Model") plt.title("Average Rankings of NLP Models") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/251/129251033.ipynb
null
null
[{"Id": 129251033, "ScriptId": 38392442, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9882726, "CreationDate": "05/12/2023 07:28:26", "VersionNumber": 1.0, "Title": "statistical_ranking", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 65.0, "LinesInsertedFromPrevious": 65.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Load the CSV file into a DataFrame df = pd.read_csv("/kaggle/input/results/results.csv") # Define metrics where a higher value is better (default is lower is better) higher_is_better = ["R2"] # Initialize an empty DataFrame to store ranks rank_df = pd.DataFrame() # Iterate over each column, rank and add the ranks to the rank_df for column in df.columns[1:]: # Skip the 'Model' column if any(metric in column for metric in higher_is_better): rank_df[column] = df[column].rank(ascending=False) # Higher is better else: rank_df[column] = df[column].rank(ascending=True) # Lower is better # Add 'Model' column to rank_df rank_df.insert(0, "Model", df["Model"]) # Calculate the average rank for each model across all metrics for each analytical measure rank_df["Average Rank"] = rank_df.iloc[:, 1:].mean(axis=1) # Sort by 'Average Rank' rank_df = rank_df.sort_values("Average Rank") # Display the final rankings final_rankings = rank_df[["Model", "Average Rank"]] print(final_rankings) import matplotlib.pyplot as plt import seaborn as sns # Create the plot plt.figure(figsize=(10, 8)) sns.barplot( x=final_rankings["Average Rank"], y=final_rankings["Model"], palette="viridis" ) # Add labels and title plt.xlabel("Average Rank") plt.ylabel("Model") plt.title("Average Rankings of NLP Models") plt.show()
false
0
588
0
588
588
129251932
# # Blueberry yield # Importing Libraries import pandas as pd import numpy as np import seaborn as sns import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt import plotly.express as px train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") train.head(15) train = train.drop(columns=["id"]) train.describe() train.info() sns.heatmap(train.iloc[:, :].corr()) final_train = train.drop( columns=[ "RainingDays", "MaxOfUpperTRange", "MinOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", ] ) final_train x = final_train.iloc[:, :11] y = final_train["yield"] model = tf.keras.Sequential([layers.Dense(11), layers.Dense(6), layers.Dense(1)]) model.compile(optimizer="adam", loss="msle", metrics=["msle"]) train_model = model.fit(x, y, batch_size=64, epochs=100, verbose=0) history_df = pd.DataFrame(train_model.history) history_df.loc[:, ["msle"]].plot() test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") test fin_test = test.drop( columns=[ "id", "RainingDays", "MaxOfUpperTRange", "MinOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", ] ) fin_test.info() pred = model.predict(fin_test) pred from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) val = model.predict(x_test) r2_score(val, y_test) predf = pd.DataFrame(pred, index=test["id"]) predf predf.to_csv("submission.csv")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/251/129251932.ipynb
null
null
[{"Id": 129251932, "ScriptId": 38404280, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11026853, "CreationDate": "05/12/2023 07:38:19", "VersionNumber": 1.0, "Title": "playground", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 67.0, "LinesInsertedFromPrevious": 67.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Blueberry yield # Importing Libraries import pandas as pd import numpy as np import seaborn as sns import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt import plotly.express as px train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") train.head(15) train = train.drop(columns=["id"]) train.describe() train.info() sns.heatmap(train.iloc[:, :].corr()) final_train = train.drop( columns=[ "RainingDays", "MaxOfUpperTRange", "MinOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", ] ) final_train x = final_train.iloc[:, :11] y = final_train["yield"] model = tf.keras.Sequential([layers.Dense(11), layers.Dense(6), layers.Dense(1)]) model.compile(optimizer="adam", loss="msle", metrics=["msle"]) train_model = model.fit(x, y, batch_size=64, epochs=100, verbose=0) history_df = pd.DataFrame(train_model.history) history_df.loc[:, ["msle"]].plot() test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") test fin_test = test.drop( columns=[ "id", "RainingDays", "MaxOfUpperTRange", "MinOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", ] ) fin_test.info() pred = model.predict(fin_test) pred from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) val = model.predict(x_test) r2_score(val, y_test) predf = pd.DataFrame(pred, index=test["id"]) predf predf.to_csv("submission.csv")
false
0
536
0
536
536
129661309
<jupyter_start><jupyter_text>Heart Failure Prediction # About this dataset &gt; Cardiovascular diseases (CVDs) are the **number 1 cause of death globally**, taking an estimated **17.9 million lives each year**, which accounts for **31% of all deaths worlwide**. Heart failure is a common event caused by CVDs and this dataset contains 12 features that can be used to predict mortality by heart failure. &gt; Most cardiovascular diseases can be prevented by addressing behavioural risk factors such as tobacco use, unhealthy diet and obesity, physical inactivity and harmful use of alcohol using population-wide strategies. &gt; People with cardiovascular disease or who are at high cardiovascular risk (due to the presence of one or more risk factors such as hypertension, diabetes, hyperlipidaemia or already established disease) need **early detection** and management wherein a machine learning model can be of great help. # How to use this dataset &gt; - Create a model for predicting mortality caused by Heart Failure. - Your kernel can be featured here! - [More datasets](https://www.kaggle.com/andrewmvd/datasets) # Acknowledgements If you use this dataset in your research, please credit the authors &gt; ### Citation Davide Chicco, Giuseppe Jurman: Machine learning can predict survival of patients with heart failure from serum creatinine and ejection fraction alone. BMC Medical Informatics and Decision Making 20, 16 (2020). ([link](https://doi.org/10.1186/s12911-020-1023-5)) &gt; ### License CC BY 4.0 &gt; ### Splash icon Icon by [Freepik](https://www.flaticon.com/authors/freepik), available on [Flaticon](https://www.flaticon.com/free-icon/heart_1186541). &gt; ### Splash banner Wallpaper by [jcomp](https://br.freepik.com/jcomp), available on [Freepik](https://br.freepik.com/fotos-gratis/simplesmente-design-minimalista-com-estetoscopio-de-equipamento-de-medicina-ou-phonendoscope_5018002.htm#page=1&query=cardiology&position=3). Kaggle dataset identifier: heart-failure-clinical-data <jupyter_code>import pandas as pd df = pd.read_csv('heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 299 entries, 0 to 298 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 age 299 non-null float64 1 anaemia 299 non-null int64 2 creatinine_phosphokinase 299 non-null int64 3 diabetes 299 non-null int64 4 ejection_fraction 299 non-null int64 5 high_blood_pressure 299 non-null int64 6 platelets 299 non-null float64 7 serum_creatinine 299 non-null float64 8 serum_sodium 299 non-null int64 9 sex 299 non-null int64 10 smoking 299 non-null int64 11 time 299 non-null int64 12 DEATH_EVENT 299 non-null int64 dtypes: float64(3), int64(10) memory usage: 30.5 KB <jupyter_text>Examples: { "age": 75.0, "anaemia": 0.0, "creatinine_phosphokinase": 582.0, "diabetes": 0.0, "ejection_fraction": 20.0, "high_blood_pressure": 1.0, "platelets": 265000.0, "serum_creatinine": 1.9, "serum_sodium": 130.0, "sex": 1.0, "smoking": 0.0, "time": 4.0, "DEATH_EVENT": 1.0 } { "age": 55.0, "anaemia": 0.0, "creatinine_phosphokinase": 7861.0, "diabetes": 0.0, "ejection_fraction": 38.0, "high_blood_pressure": 0.0, "platelets": 263358.03, "serum_creatinine": 1.1, "serum_sodium": 136.0, "sex": 1.0, "smoking": 0.0, "time": 6.0, "DEATH_EVENT": 1.0 } { "age": 65.0, "anaemia": 0.0, "creatinine_phosphokinase": 146.0, "diabetes": 0.0, "ejection_fraction": 20.0, "high_blood_pressure": 0.0, "platelets": 162000.0, "serum_creatinine": 1.3, "serum_sodium": 129.0, "sex": 1.0, "smoking": 1.0, "time": 7.0, "DEATH_EVENT": 1.0 } { "age": 50.0, "anaemia": 1.0, "creatinine_phosphokinase": 111.0, "diabetes": 0.0, "ejection_fraction": 20.0, "high_blood_pressure": 0.0, "platelets": 210000.0, "serum_creatinine": 1.9, "serum_sodium": 137.0, "sex": 1.0, "smoking": 0.0, "time": 7.0, "DEATH_EVENT": 1.0 } <jupyter_script>import numpy as np import pandas as pd df = pd.read_csv( "/kaggle/input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv" ) df dataset = df.values dataset x = dataset[:, 0:12] y = dataset[:, 10] from sklearn import preprocessing sc = preprocessing.MinMaxScaler() x_scale = sc.fit_transform(x) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x_scale, y, test_size=0.3, random_state=42 ) # print(x_train.shape,y_train.shape,x_test.shape,y_test.shape) from keras.models import Sequential from keras.layers import Dense from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import Dropout model = Sequential() model.add(Dense(32, activation="relu", input_shape=(12,))) model.add(Dropout(0.2)) model.add(Dense(32, activation="relu")) model.add(Dropout(0.2)) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) model.summary() hist = model.fit(x_train, y_train, batch_size=256, epochs=30, verbose=1) _, accuracy = model.evaluate(x_test, y_test) print("Accuracy: %.2f" % (accuracy * 100)) # # **SVM** from sklearn import svm from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=100 ) clf = svm.SVC() clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100) # # **Random Forest** from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) clf = RandomForestClassifier(n_estimators=1000, random_state=1) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100) # # NAIVE BIAS from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=40 ) clf = GaussianNB() clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100) # # DECISION TREE from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=40 ) clf = DecisionTreeClassifier() clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100) # # KNN from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier(n_neighbors=5) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/661/129661309.ipynb
heart-failure-clinical-data
andrewmvd
[{"Id": 129661309, "ScriptId": 38521655, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12850841, "CreationDate": "05/15/2023 14:53:33", "VersionNumber": 1.0, "Title": "heart failure prediction ann", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 118.0, "LinesInsertedFromPrevious": 118.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185965323, "KernelVersionId": 129661309, "SourceDatasetVersionId": 1263738}]
[{"Id": 1263738, "DatasetId": 727551, "DatasourceVersionId": 1295676, "CreatorUserId": 793761, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "06/20/2020 01:03:20", "VersionNumber": 1.0, "Title": "Heart Failure Prediction", "Slug": "heart-failure-clinical-data", "Subtitle": "12 clinical features por predicting death events.", "Description": "# About this dataset\n&gt; Cardiovascular diseases (CVDs) are the **number 1 cause of death globally**, taking an estimated **17.9 million lives each year**, which accounts for **31% of all deaths worlwide**.\nHeart failure is a common event caused by CVDs and this dataset contains 12 features that can be used to predict mortality by heart failure.\n\n&gt; Most cardiovascular diseases can be prevented by addressing behavioural risk factors such as tobacco use, unhealthy diet and obesity, physical inactivity and harmful use of alcohol using population-wide strategies.\n\n&gt; People with cardiovascular disease or who are at high cardiovascular risk (due to the presence of one or more risk factors such as hypertension, diabetes, hyperlipidaemia or already established disease) need **early detection** and management wherein a machine learning model can be of great help.\n\n# How to use this dataset\n&gt; - Create a model for predicting mortality caused by Heart Failure.\n- Your kernel can be featured here!\n- [More datasets](https://www.kaggle.com/andrewmvd/datasets)\n\n\n\n# Acknowledgements\nIf you use this dataset in your research, please credit the authors\n&gt; ### Citation\nDavide Chicco, Giuseppe Jurman: Machine learning can predict survival of patients with heart failure from serum creatinine and ejection fraction alone. BMC Medical Informatics and Decision Making 20, 16 (2020). ([link](https://doi.org/10.1186/s12911-020-1023-5))\n\n&gt; ### License\nCC BY 4.0\n\n&gt; ### Splash icon\nIcon by [Freepik](https://www.flaticon.com/authors/freepik), available on [Flaticon](https://www.flaticon.com/free-icon/heart_1186541).\n\n&gt; ### Splash banner\nWallpaper by [jcomp](https://br.freepik.com/jcomp), available on [Freepik](https://br.freepik.com/fotos-gratis/simplesmente-design-minimalista-com-estetoscopio-de-equipamento-de-medicina-ou-phonendoscope_5018002.htm#page=1&query=cardiology&position=3).", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 727551, "CreatorUserId": 793761, "OwnerUserId": 793761.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1263738.0, "CurrentDatasourceVersionId": 1295676.0, "ForumId": 742394, "Type": 2, "CreationDate": "06/20/2020 01:03:20", "LastActivityDate": "06/20/2020", "TotalViews": 882099, "TotalDownloads": 116977, "TotalVotes": 2090, "TotalKernels": 920}]
[{"Id": 793761, "UserName": "andrewmvd", "DisplayName": "Larxel", "RegisterDate": "11/15/2016", "PerformanceTier": 4}]
import numpy as np import pandas as pd df = pd.read_csv( "/kaggle/input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv" ) df dataset = df.values dataset x = dataset[:, 0:12] y = dataset[:, 10] from sklearn import preprocessing sc = preprocessing.MinMaxScaler() x_scale = sc.fit_transform(x) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x_scale, y, test_size=0.3, random_state=42 ) # print(x_train.shape,y_train.shape,x_test.shape,y_test.shape) from keras.models import Sequential from keras.layers import Dense from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import Dropout model = Sequential() model.add(Dense(32, activation="relu", input_shape=(12,))) model.add(Dropout(0.2)) model.add(Dense(32, activation="relu")) model.add(Dropout(0.2)) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) model.summary() hist = model.fit(x_train, y_train, batch_size=256, epochs=30, verbose=1) _, accuracy = model.evaluate(x_test, y_test) print("Accuracy: %.2f" % (accuracy * 100)) # # **SVM** from sklearn import svm from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=100 ) clf = svm.SVC() clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100) # # **Random Forest** from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) clf = RandomForestClassifier(n_estimators=1000, random_state=1) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100) # # NAIVE BIAS from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=40 ) clf = GaussianNB() clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100) # # DECISION TREE from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=40 ) clf = DecisionTreeClassifier() clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100) # # KNN from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier(n_neighbors=5) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy * 100)
[{"heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv": {"column_names": "[\"age\", \"anaemia\", \"creatinine_phosphokinase\", \"diabetes\", \"ejection_fraction\", \"high_blood_pressure\", \"platelets\", \"serum_creatinine\", \"serum_sodium\", \"sex\", \"smoking\", \"time\", \"DEATH_EVENT\"]", "column_data_types": "{\"age\": \"float64\", \"anaemia\": \"int64\", \"creatinine_phosphokinase\": \"int64\", \"diabetes\": \"int64\", \"ejection_fraction\": \"int64\", \"high_blood_pressure\": \"int64\", \"platelets\": \"float64\", \"serum_creatinine\": \"float64\", \"serum_sodium\": \"int64\", \"sex\": \"int64\", \"smoking\": \"int64\", \"time\": \"int64\", \"DEATH_EVENT\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 299 entries, 0 to 298\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 299 non-null float64\n 1 anaemia 299 non-null int64 \n 2 creatinine_phosphokinase 299 non-null int64 \n 3 diabetes 299 non-null int64 \n 4 ejection_fraction 299 non-null int64 \n 5 high_blood_pressure 299 non-null int64 \n 6 platelets 299 non-null float64\n 7 serum_creatinine 299 non-null float64\n 8 serum_sodium 299 non-null int64 \n 9 sex 299 non-null int64 \n 10 smoking 299 non-null int64 \n 11 time 299 non-null int64 \n 12 DEATH_EVENT 299 non-null int64 \ndtypes: float64(3), int64(10)\nmemory usage: 30.5 KB\n", "summary": "{\"age\": {\"count\": 299.0, \"mean\": 60.83389297658862, \"std\": 11.89480907404447, \"min\": 40.0, \"25%\": 51.0, \"50%\": 60.0, \"75%\": 70.0, \"max\": 95.0}, \"anaemia\": {\"count\": 299.0, \"mean\": 0.431438127090301, \"std\": 0.4961072681330793, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"creatinine_phosphokinase\": {\"count\": 299.0, \"mean\": 581.8394648829432, \"std\": 970.2878807124362, \"min\": 23.0, \"25%\": 116.5, \"50%\": 250.0, \"75%\": 582.0, \"max\": 7861.0}, \"diabetes\": {\"count\": 299.0, \"mean\": 0.4180602006688963, \"std\": 0.49406706510360904, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"ejection_fraction\": {\"count\": 299.0, \"mean\": 38.08361204013378, \"std\": 11.834840741039171, \"min\": 14.0, \"25%\": 30.0, \"50%\": 38.0, \"75%\": 45.0, \"max\": 80.0}, \"high_blood_pressure\": {\"count\": 299.0, \"mean\": 0.3511705685618729, \"std\": 0.47813637906274475, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"platelets\": {\"count\": 299.0, \"mean\": 263358.02926421404, \"std\": 97804.2368685983, \"min\": 25100.0, \"25%\": 212500.0, \"50%\": 262000.0, \"75%\": 303500.0, \"max\": 850000.0}, \"serum_creatinine\": {\"count\": 299.0, \"mean\": 1.3938795986622072, \"std\": 1.0345100640898541, \"min\": 0.5, \"25%\": 0.9, \"50%\": 1.1, \"75%\": 1.4, \"max\": 9.4}, \"serum_sodium\": {\"count\": 299.0, \"mean\": 136.62541806020067, \"std\": 4.412477283909235, \"min\": 113.0, \"25%\": 134.0, \"50%\": 137.0, \"75%\": 140.0, \"max\": 148.0}, \"sex\": {\"count\": 299.0, \"mean\": 0.6488294314381271, \"std\": 0.47813637906274475, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"smoking\": {\"count\": 299.0, \"mean\": 0.3210702341137124, \"std\": 0.46767042805677167, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"time\": {\"count\": 299.0, \"mean\": 130.2608695652174, \"std\": 77.61420795029339, \"min\": 4.0, \"25%\": 73.0, \"50%\": 115.0, \"75%\": 203.0, \"max\": 285.0}, \"DEATH_EVENT\": {\"count\": 299.0, \"mean\": 0.3210702341137124, \"std\": 0.46767042805677167, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"age\":{\"0\":75.0,\"1\":55.0,\"2\":65.0,\"3\":50.0},\"anaemia\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"creatinine_phosphokinase\":{\"0\":582,\"1\":7861,\"2\":146,\"3\":111},\"diabetes\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"ejection_fraction\":{\"0\":20,\"1\":38,\"2\":20,\"3\":20},\"high_blood_pressure\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"platelets\":{\"0\":265000.0,\"1\":263358.03,\"2\":162000.0,\"3\":210000.0},\"serum_creatinine\":{\"0\":1.9,\"1\":1.1,\"2\":1.3,\"3\":1.9},\"serum_sodium\":{\"0\":130,\"1\":136,\"2\":129,\"3\":137},\"sex\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"smoking\":{\"0\":0,\"1\":0,\"2\":1,\"3\":0},\"time\":{\"0\":4,\"1\":6,\"2\":7,\"3\":7},\"DEATH_EVENT\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1}}"}}]
true
1
<start_data_description><data_path>heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv: <column_names> ['age', 'anaemia', 'creatinine_phosphokinase', 'diabetes', 'ejection_fraction', 'high_blood_pressure', 'platelets', 'serum_creatinine', 'serum_sodium', 'sex', 'smoking', 'time', 'DEATH_EVENT'] <column_types> {'age': 'float64', 'anaemia': 'int64', 'creatinine_phosphokinase': 'int64', 'diabetes': 'int64', 'ejection_fraction': 'int64', 'high_blood_pressure': 'int64', 'platelets': 'float64', 'serum_creatinine': 'float64', 'serum_sodium': 'int64', 'sex': 'int64', 'smoking': 'int64', 'time': 'int64', 'DEATH_EVENT': 'int64'} <dataframe_Summary> {'age': {'count': 299.0, 'mean': 60.83389297658862, 'std': 11.89480907404447, 'min': 40.0, '25%': 51.0, '50%': 60.0, '75%': 70.0, 'max': 95.0}, 'anaemia': {'count': 299.0, 'mean': 0.431438127090301, 'std': 0.4961072681330793, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'creatinine_phosphokinase': {'count': 299.0, 'mean': 581.8394648829432, 'std': 970.2878807124362, 'min': 23.0, '25%': 116.5, '50%': 250.0, '75%': 582.0, 'max': 7861.0}, 'diabetes': {'count': 299.0, 'mean': 0.4180602006688963, 'std': 0.49406706510360904, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'ejection_fraction': {'count': 299.0, 'mean': 38.08361204013378, 'std': 11.834840741039171, 'min': 14.0, '25%': 30.0, '50%': 38.0, '75%': 45.0, 'max': 80.0}, 'high_blood_pressure': {'count': 299.0, 'mean': 0.3511705685618729, 'std': 0.47813637906274475, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'platelets': {'count': 299.0, 'mean': 263358.02926421404, 'std': 97804.2368685983, 'min': 25100.0, '25%': 212500.0, '50%': 262000.0, '75%': 303500.0, 'max': 850000.0}, 'serum_creatinine': {'count': 299.0, 'mean': 1.3938795986622072, 'std': 1.0345100640898541, 'min': 0.5, '25%': 0.9, '50%': 1.1, '75%': 1.4, 'max': 9.4}, 'serum_sodium': {'count': 299.0, 'mean': 136.62541806020067, 'std': 4.412477283909235, 'min': 113.0, '25%': 134.0, '50%': 137.0, '75%': 140.0, 'max': 148.0}, 'sex': {'count': 299.0, 'mean': 0.6488294314381271, 'std': 0.47813637906274475, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'smoking': {'count': 299.0, 'mean': 0.3210702341137124, 'std': 0.46767042805677167, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'time': {'count': 299.0, 'mean': 130.2608695652174, 'std': 77.61420795029339, 'min': 4.0, '25%': 73.0, '50%': 115.0, '75%': 203.0, 'max': 285.0}, 'DEATH_EVENT': {'count': 299.0, 'mean': 0.3210702341137124, 'std': 0.46767042805677167, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}} <dataframe_info> RangeIndex: 299 entries, 0 to 298 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 age 299 non-null float64 1 anaemia 299 non-null int64 2 creatinine_phosphokinase 299 non-null int64 3 diabetes 299 non-null int64 4 ejection_fraction 299 non-null int64 5 high_blood_pressure 299 non-null int64 6 platelets 299 non-null float64 7 serum_creatinine 299 non-null float64 8 serum_sodium 299 non-null int64 9 sex 299 non-null int64 10 smoking 299 non-null int64 11 time 299 non-null int64 12 DEATH_EVENT 299 non-null int64 dtypes: float64(3), int64(10) memory usage: 30.5 KB <some_examples> {'age': {'0': 75.0, '1': 55.0, '2': 65.0, '3': 50.0}, 'anaemia': {'0': 0, '1': 0, '2': 0, '3': 1}, 'creatinine_phosphokinase': {'0': 582, '1': 7861, '2': 146, '3': 111}, 'diabetes': {'0': 0, '1': 0, '2': 0, '3': 0}, 'ejection_fraction': {'0': 20, '1': 38, '2': 20, '3': 20}, 'high_blood_pressure': {'0': 1, '1': 0, '2': 0, '3': 0}, 'platelets': {'0': 265000.0, '1': 263358.03, '2': 162000.0, '3': 210000.0}, 'serum_creatinine': {'0': 1.9, '1': 1.1, '2': 1.3, '3': 1.9}, 'serum_sodium': {'0': 130, '1': 136, '2': 129, '3': 137}, 'sex': {'0': 1, '1': 1, '2': 1, '3': 1}, 'smoking': {'0': 0, '1': 0, '2': 1, '3': 0}, 'time': {'0': 4, '1': 6, '2': 7, '3': 7}, 'DEATH_EVENT': {'0': 1, '1': 1, '2': 1, '3': 1}} <end_description>
1,063
0
2,699
1,063
129661007
<jupyter_start><jupyter_text>Marijuana Arrests in Toronto: Racial Disparities ``` Data on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables. ``` | Column | Description | | --- | --- | | released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes. | | colour | The arrestee's race; a factor with levels: Black; White. | | year | 1997 through 2002; a numeric vector. | | age | in years; a numeric vector. | | sex | a factor with levels: Female; Male. | | employed | a factor with levels: No; Yes. | | citizen | a factor with levels: No; Yes. | | checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. – 6 in all) on which the arrestee's name appeared; a numeric vector | # Source Personal communication from Michael Friendly, York University. Kaggle dataset identifier: arrests-for-marijuana-possession <jupyter_script>import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px df = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv") df.head(5) df = df.iloc[:, 1:] # # Age feature is positively skewed # Older people tend to deal with drugs less sns.histplot(df, x="age", kde=True, color="g") def plots(df, x, y): f, ax = plt.subplots(1, 3, figsize=(25, 10)) Group_data = df.groupby(y) sns.histplot(df, x=x, hue=y, ax=ax[0], kde=True) sns.barplot( x=Group_data[x].mean().index, y=Group_data[x].mean().values, ax=ax[1], palette="mako", ) for container in ax[1].containers: ax[1].bar_label(container, color="black", size=20) palette_color = sns.color_palette("summer") plt.pie( x=df[y].value_counts(), labels=df[y].value_counts().index, autopct="%.0f%%", shadow=True, colors=palette_color, ) plt.suptitle( "{} histogram and barplots grouped by {}\n{} pie chart".format( x, y, y ).capitalize() ) ax[0].set_title("Data distribution of {} grouped by labelled by {}".format(x, y)) ax[1].set_title( "Bar plots, showing mean values for {} for each category of {}".format(x, y) ) ax[2].set_title( "Pie chart showing ratio between categories for {} feature".format(x, y) ) plt.show() # # Histograms, barplots and pie charts # Histograms - for data distribution for each category # Bar plots - mean value of age for each category # Pie charts - ratio between each of categoric values for i in ["released", "colour", "year", "employed", "citizen", "checks"]: plots(df, "age", i) # # Mean age of individuals arrested for the possession of marijuana each year grouped = df.groupby("year") mean_age = grouped["age"].mean() years = mean_age.index fig = px.line(x=years, y=mean_age, title="Year vs Age") fig.update_layout( xaxis_title="Year labels", yaxis_title="Mean values for each category in Years" ) fig.show() # # Mean age of people arrested for marijuana for each amount of checks grouped = df.groupby("checks") mean_age = grouped["age"].mean() checks = mean_age.index fig = px.line(x=checks, y=mean_age, title="Checks vs Age line") fig.update_layout( xaxis_title="Checks categories", yaxis_title="Mean values of Age for each category" ) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/661/129661007.ipynb
arrests-for-marijuana-possession
utkarshx27
[{"Id": 129661007, "ScriptId": 38316314, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11036701, "CreationDate": "05/15/2023 14:51:07", "VersionNumber": 1.0, "Title": "Marijuana Arrests EDA", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 66.0, "LinesInsertedFromPrevious": 66.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185964771, "KernelVersionId": 129661007, "SourceDatasetVersionId": 5631796}]
[{"Id": 5631796, "DatasetId": 3238325, "DatasourceVersionId": 5707058, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/08/2023 10:17:21", "VersionNumber": 1.0, "Title": "Marijuana Arrests in Toronto: Racial Disparities", "Slug": "arrests-for-marijuana-possession", "Subtitle": "Marijuana Arrests in Toronto: Race, Release, and Policing (1997-2002)", "Description": "``` \nData on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables.\n```\n| Column | Description |\n| --- | --- |\n| released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes.\n |\n| colour | The arrestee's race; a factor with levels: Black; White. |\n| year | 1997 through 2002; a numeric vector. |\n| age | in years; a numeric vector. |\n| sex | a factor with levels: Female; Male. |\n| employed | a factor with levels: No; Yes. |\n| citizen | a factor with levels: No; Yes. |\n| checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. \u2013 6 in all) on which the arrestee's name appeared; a numeric vector |\n\n# Source\nPersonal communication from Michael Friendly, York University.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3238325, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5631796.0, "CurrentDatasourceVersionId": 5707058.0, "ForumId": 3303517, "Type": 2, "CreationDate": "05/08/2023 10:17:21", "LastActivityDate": "05/08/2023", "TotalViews": 8788, "TotalDownloads": 1614, "TotalVotes": 49, "TotalKernels": 14}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px df = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv") df.head(5) df = df.iloc[:, 1:] # # Age feature is positively skewed # Older people tend to deal with drugs less sns.histplot(df, x="age", kde=True, color="g") def plots(df, x, y): f, ax = plt.subplots(1, 3, figsize=(25, 10)) Group_data = df.groupby(y) sns.histplot(df, x=x, hue=y, ax=ax[0], kde=True) sns.barplot( x=Group_data[x].mean().index, y=Group_data[x].mean().values, ax=ax[1], palette="mako", ) for container in ax[1].containers: ax[1].bar_label(container, color="black", size=20) palette_color = sns.color_palette("summer") plt.pie( x=df[y].value_counts(), labels=df[y].value_counts().index, autopct="%.0f%%", shadow=True, colors=palette_color, ) plt.suptitle( "{} histogram and barplots grouped by {}\n{} pie chart".format( x, y, y ).capitalize() ) ax[0].set_title("Data distribution of {} grouped by labelled by {}".format(x, y)) ax[1].set_title( "Bar plots, showing mean values for {} for each category of {}".format(x, y) ) ax[2].set_title( "Pie chart showing ratio between categories for {} feature".format(x, y) ) plt.show() # # Histograms, barplots and pie charts # Histograms - for data distribution for each category # Bar plots - mean value of age for each category # Pie charts - ratio between each of categoric values for i in ["released", "colour", "year", "employed", "citizen", "checks"]: plots(df, "age", i) # # Mean age of individuals arrested for the possession of marijuana each year grouped = df.groupby("year") mean_age = grouped["age"].mean() years = mean_age.index fig = px.line(x=years, y=mean_age, title="Year vs Age") fig.update_layout( xaxis_title="Year labels", yaxis_title="Mean values for each category in Years" ) fig.show() # # Mean age of people arrested for marijuana for each amount of checks grouped = df.groupby("checks") mean_age = grouped["age"].mean() checks = mean_age.index fig = px.line(x=checks, y=mean_age, title="Checks vs Age line") fig.update_layout( xaxis_title="Checks categories", yaxis_title="Mean values of Age for each category" ) fig.show()
false
1
768
2
1,092
768
129782661
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from darts.datasets import AirPassengersDataset, MonthlyMilkDataset import pandas as pd MonthlyMilkDataset().load().pd_dataframe() AirPassengersDataset().load().pd_series() AirPassengersDataset().load().pd_dataframe() import matplotlib.pyplot as plt series_ar = AirPassengersDataset().load() series_milk = MonthlyMilkDataset().load() series_ar.plot(label="no of passengers") series_milk.plot(label="numbers of milks") from darts.dataprocessing.transformers import Scaler scaler_air, scaler_milk = Scaler(), Scaler() series_air_scaled = scaler_air.fit_transform(series_ar) series_milk_scaled = scaler_milk.fit_transform(series_milk) series_air_scaled.plot(label="air") series_milk_scaled.plot(label="milk") train_air, val_air = series_air_scaled[:-36], series_air_scaled[-36:] train_milk, val_milk = series_milk_scaled[:-36], series_milk_scaled[-36:] from darts import TimeSeries from darts.utils.timeseries_generation import ( gaussian_timeseries, linear_timeseries, sine_timeseries, ) from darts.models import ( RNNModel, TCNModel, TransformerModel, NBEATSModel, BlockRNNModel, ) from darts.metrics import mape, smape model_air_milk = NBEATSModel( input_chunk_length=24, output_chunk_length=12, n_epochs=100, random_state=0 ) model_air_milk.fit([train_air, train_milk], verbose=True) pred = model_air_milk.predict(n=36, series=val_air) pred = model_air_milk.predict(n=36, series=val_air) series_air_scaled.plot(label="actual") pred.plot(label="forecast") plt.legend() print("MAPE = {:.2f}%".format(mape(series_air_scaled, pred))) pred = model_air_milk.predict(n=36, series=train_milk) series_milk_scaled.plot(label="actual") pred.plot(label="forecast") plt.legend() print("MAPE = {:.2f}%".format(mape(series_milk_scaled, pred)))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/782/129782661.ipynb
null
null
[{"Id": 129782661, "ScriptId": 29846616, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9224136, "CreationDate": "05/16/2023 12:38:27", "VersionNumber": 1.0, "Title": "Time Series Made Easy in Python USing Darts Librar", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 74.0, "LinesInsertedFromPrevious": 74.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from darts.datasets import AirPassengersDataset, MonthlyMilkDataset import pandas as pd MonthlyMilkDataset().load().pd_dataframe() AirPassengersDataset().load().pd_series() AirPassengersDataset().load().pd_dataframe() import matplotlib.pyplot as plt series_ar = AirPassengersDataset().load() series_milk = MonthlyMilkDataset().load() series_ar.plot(label="no of passengers") series_milk.plot(label="numbers of milks") from darts.dataprocessing.transformers import Scaler scaler_air, scaler_milk = Scaler(), Scaler() series_air_scaled = scaler_air.fit_transform(series_ar) series_milk_scaled = scaler_milk.fit_transform(series_milk) series_air_scaled.plot(label="air") series_milk_scaled.plot(label="milk") train_air, val_air = series_air_scaled[:-36], series_air_scaled[-36:] train_milk, val_milk = series_milk_scaled[:-36], series_milk_scaled[-36:] from darts import TimeSeries from darts.utils.timeseries_generation import ( gaussian_timeseries, linear_timeseries, sine_timeseries, ) from darts.models import ( RNNModel, TCNModel, TransformerModel, NBEATSModel, BlockRNNModel, ) from darts.metrics import mape, smape model_air_milk = NBEATSModel( input_chunk_length=24, output_chunk_length=12, n_epochs=100, random_state=0 ) model_air_milk.fit([train_air, train_milk], verbose=True) pred = model_air_milk.predict(n=36, series=val_air) pred = model_air_milk.predict(n=36, series=val_air) series_air_scaled.plot(label="actual") pred.plot(label="forecast") plt.legend() print("MAPE = {:.2f}%".format(mape(series_air_scaled, pred))) pred = model_air_milk.predict(n=36, series=train_milk) series_milk_scaled.plot(label="actual") pred.plot(label="forecast") plt.legend() print("MAPE = {:.2f}%".format(mape(series_milk_scaled, pred)))
false
0
793
0
793
793
129782855
<jupyter_start><jupyter_text>Data Science Job Salaries ### Content | Column | Description | |--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | work_year | The year the salary was paid. | | experience_level | The experience level in the job during the year with the following possible values: EN Entry-level / Junior MI Mid-level / Intermediate SE Senior-level / Expert EX Executive-level / Director | | employment_type | The type of employement for the role: PT Part-time FT Full-time CT Contract FL Freelance | | job_title | The role worked in during the year. | | salary | The total gross salary amount paid. | | salary_currency | The currency of the salary paid as an ISO 4217 currency code. | | salary_in_usd | The salary in USD (FX rate divided by avg. USD rate for the respective year via fxdata.foorilla.com). | | employee_residence | Employee's primary country of residence in during the work year as an ISO 3166 country code. | | remote_ratio | The overall amount of work done remotely, possible values are as follows: 0 No remote work (less than 20%) 50 Partially remote 100 Fully remote (more than 80%) | | company_location | The country of the employer's main office or contracting branch as an ISO 3166 country code. | | company_size | The average number of people that worked for the company during the year: S less than 50 employees (small) M 50 to 250 employees (medium) L more than 250 employees (large) | Kaggle dataset identifier: data-science-job-salaries <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats import statsmodels.api as sm from statsmodels.formula.api import ols df = pd.read_csv("../input/data-science-job-salaries/ds_salaries.csv", index_col=0) df.head() df.shape df["salary_in_usd"] = (df["salary_in_usd"] / 12).astype("int32") df.head() df.isna().sum() df.describe() df.head() df["remote_ratio"].value_counts() df["job_title"].unique() df["job_title"] = df["job_title"].replace( "Finance Data Analyst", "Financial Data Analyst" ) analyst = df[df["job_title"].str.contains("Data Analyst")] analyst["job_title"].value_counts() analyst.describe() analyst_remote = analyst["remote_ratio"].value_counts() analyst_remote = analyst["remote_ratio"].value_counts() labels = ["home-office", "hybrid", "on-site"] pie_remote = analyst_remote.plot.pie( labels=labels, colors=sns.color_palette("muted"), autopct="%1.1f%%", figsize=(8, 8) ) plt.ylabel("") plt.title("Distribuition by ratio of remote work", fontsize=20) plt.show() sns.barplot(x="work_year", y="remote_ratio", data=analyst) plt.ylabel("Remote Ratio of Work") plt.xlabel("Work Year") plt.title("Distribuition by ratio of remote work and Year", fontsize=15) df.employment_type.value_counts() my_data = df[df["job_title"].str.contains("Machine Learning")] my_data.job_title.value_counts() analyst_remote = my_data["remote_ratio"].value_counts() labels = ["home-office", "hybrid", "on-site"] pie_remote = analyst_remote.plot.pie( labels=labels, colors=sns.color_palette("muted"), autopct="%1.1f%%", figsize=(8, 8) ) plt.ylabel("") plt.title("Distribuition by ratio of remote work", fontsize=20) plt.show() df.experience_level.unique() plt.figure(figsize=(12, 10)) sns.catplot(x="experience_level", data=df, kind="count", palette="magma") plt.show() levels = df.experience_level.value_counts() levels explode = [0, 0.1, 0.1, 0.3] plt.pie(x=levels.values, labels=levels.index, autopct="%1.2f%%", explode=explode) plt.title("Experience Level") plt.legend() plt.show() levels min, max = analyst.salary_in_usd.quantile([0.15, 0.985]) analyst_n_out = analyst[(analyst.salary_in_usd > min) & (analyst.salary_in_usd < max)] sns.barplot(x="work_year", y="salary_in_usd", data=analyst_n_out) plt.ylabel("Monthly Salaray in USD") plt.xlabel("Work Year") plt.title("Mean of Monthly Salary by Year", fontsize=15) sns.histplot(analyst_n_out["salary_in_usd"]) plt.title("Distribuition by Mean Monthly Salary in USD", fontsize=15) plt.xlabel("Monthly Salaray in USD") analyst_n_out ax = sns.barplot(x="job_title", y="salary_in_usd", data=analyst_n_out) ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha="right") plt.ylabel("Monthly Salaray in USD") plt.xlabel("Job Title") plt.title("Mean Monthly Salary in USD by Job Title", fontsize=15) ax = sns.barplot(x="employment_type", y="remote_ratio", data=analyst_n_out) ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha="right") plt.ylabel("Monthly Salaray in USD") plt.xlabel("Job Title") plt.title("Mean Monthly Salary in USD by Job Title", fontsize=15)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/782/129782855.ipynb
data-science-job-salaries
ruchi798
[{"Id": 129782855, "ScriptId": 29246302, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9224136, "CreationDate": "05/16/2023 12:39:53", "VersionNumber": 1.0, "Title": "Data Science Job Salaries", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 139.0, "LinesInsertedFromPrevious": 139.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186147695, "KernelVersionId": 129782855, "SourceDatasetVersionId": 3806098}]
[{"Id": 3806098, "DatasetId": 2268489, "DatasourceVersionId": 3860816, "CreatorUserId": 3309826, "LicenseName": "CC0: Public Domain", "CreationDate": "06/15/2022 08:59:12", "VersionNumber": 1.0, "Title": "Data Science Job Salaries", "Slug": "data-science-job-salaries", "Subtitle": "Salaries of jobs in the Data Science domain", "Description": "### Content\n| Column | Description |\n|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| work_year | The year the salary was paid. |\n| experience_level | The experience level in the job during the year with the following possible values: EN Entry-level / Junior MI Mid-level / Intermediate SE Senior-level / Expert EX Executive-level / Director |\n| employment_type | The type of employement for the role: PT Part-time FT Full-time CT Contract FL Freelance |\n| job_title | The role worked in during the year. |\n| salary | The total gross salary amount paid. |\n| salary_currency | The currency of the salary paid as an ISO 4217 currency code. |\n| salary_in_usd | The salary in USD (FX rate divided by avg. USD rate for the respective year via fxdata.foorilla.com). |\n| employee_residence | Employee's primary country of residence in during the work year as an ISO 3166 country code. |\n| remote_ratio | The overall amount of work done remotely, possible values are as follows: 0 No remote work (less than 20%) 50 Partially remote 100 Fully remote (more than 80%) |\n| company_location | The country of the employer's main office or contracting branch as an ISO 3166 country code. |\n| company_size | The average number of people that worked for the company during the year: S less than 50 employees (small) M 50 to 250 employees (medium) L more than 250 employees (large) |\n\n### Acknowledgements\nI'd like to thank ai-jobs.net Salaries for aggregating this data!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2268489, "CreatorUserId": 3309826, "OwnerUserId": 3309826.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3806098.0, "CurrentDatasourceVersionId": 3860816.0, "ForumId": 2294990, "Type": 2, "CreationDate": "06/15/2022 08:59:12", "LastActivityDate": "06/15/2022", "TotalViews": 338940, "TotalDownloads": 59962, "TotalVotes": 1421, "TotalKernels": 360}]
[{"Id": 3309826, "UserName": "ruchi798", "DisplayName": "Ruchi Bhatia", "RegisterDate": "06/04/2019", "PerformanceTier": 4}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats import statsmodels.api as sm from statsmodels.formula.api import ols df = pd.read_csv("../input/data-science-job-salaries/ds_salaries.csv", index_col=0) df.head() df.shape df["salary_in_usd"] = (df["salary_in_usd"] / 12).astype("int32") df.head() df.isna().sum() df.describe() df.head() df["remote_ratio"].value_counts() df["job_title"].unique() df["job_title"] = df["job_title"].replace( "Finance Data Analyst", "Financial Data Analyst" ) analyst = df[df["job_title"].str.contains("Data Analyst")] analyst["job_title"].value_counts() analyst.describe() analyst_remote = analyst["remote_ratio"].value_counts() analyst_remote = analyst["remote_ratio"].value_counts() labels = ["home-office", "hybrid", "on-site"] pie_remote = analyst_remote.plot.pie( labels=labels, colors=sns.color_palette("muted"), autopct="%1.1f%%", figsize=(8, 8) ) plt.ylabel("") plt.title("Distribuition by ratio of remote work", fontsize=20) plt.show() sns.barplot(x="work_year", y="remote_ratio", data=analyst) plt.ylabel("Remote Ratio of Work") plt.xlabel("Work Year") plt.title("Distribuition by ratio of remote work and Year", fontsize=15) df.employment_type.value_counts() my_data = df[df["job_title"].str.contains("Machine Learning")] my_data.job_title.value_counts() analyst_remote = my_data["remote_ratio"].value_counts() labels = ["home-office", "hybrid", "on-site"] pie_remote = analyst_remote.plot.pie( labels=labels, colors=sns.color_palette("muted"), autopct="%1.1f%%", figsize=(8, 8) ) plt.ylabel("") plt.title("Distribuition by ratio of remote work", fontsize=20) plt.show() df.experience_level.unique() plt.figure(figsize=(12, 10)) sns.catplot(x="experience_level", data=df, kind="count", palette="magma") plt.show() levels = df.experience_level.value_counts() levels explode = [0, 0.1, 0.1, 0.3] plt.pie(x=levels.values, labels=levels.index, autopct="%1.2f%%", explode=explode) plt.title("Experience Level") plt.legend() plt.show() levels min, max = analyst.salary_in_usd.quantile([0.15, 0.985]) analyst_n_out = analyst[(analyst.salary_in_usd > min) & (analyst.salary_in_usd < max)] sns.barplot(x="work_year", y="salary_in_usd", data=analyst_n_out) plt.ylabel("Monthly Salaray in USD") plt.xlabel("Work Year") plt.title("Mean of Monthly Salary by Year", fontsize=15) sns.histplot(analyst_n_out["salary_in_usd"]) plt.title("Distribuition by Mean Monthly Salary in USD", fontsize=15) plt.xlabel("Monthly Salaray in USD") analyst_n_out ax = sns.barplot(x="job_title", y="salary_in_usd", data=analyst_n_out) ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha="right") plt.ylabel("Monthly Salaray in USD") plt.xlabel("Job Title") plt.title("Mean Monthly Salary in USD by Job Title", fontsize=15) ax = sns.barplot(x="employment_type", y="remote_ratio", data=analyst_n_out) ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha="right") plt.ylabel("Monthly Salaray in USD") plt.xlabel("Job Title") plt.title("Mean Monthly Salary in USD by Job Title", fontsize=15)
false
1
1,287
0
1,716
1,287
129550650
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder url = "/kaggle/input/demand-forecasting-kernels-only/train.csv" df_train = pd.read_csv(url) df_train["date"] = pd.to_datetime(df_train["date"]) df_train.head() # load test set url2 = "/kaggle/input/demand-forecasting-kernels-only/test.csv" df_test = pd.read_csv(url2) df_test["date"] = pd.to_datetime(df_test["date"]) df_test.head() # Concatenate the training and testing dataframes df_combined = pd.concat([df_train, df_test]).reset_index(drop=True) # Display basic statistics df_train.describe() # Check for missing values df_train.isnull().sum() import plotly.express as px # # Downsample the data by month and calculate the mean sales for each month # df_downsampled = df_train.resample('M', on='date').mean() # # Create a line plot using Plotly Express # fig = px.line(df_downsampled, x=df_downsampled.index, y='sales', title='Sales Over Time') # # Display the plot # fig.show() # # Sales by store # fig = px.bar(df_train.groupby('store')['sales'].sum().reset_index(), x='store', y='sales', title='Sales by Store') # fig.show() # # Sales by item # fig = px.bar(df_train.groupby('item')['sales'].sum().reset_index(), x='item', y='sales', title='Sales by Item') # fig.show() # # Seasonality check - Average sales by month # df_train['month'] = df_train['date'].dt.month # df_train['year'] = df_train['date'].dt.year # fig = px.line(df_train.groupby(['year','month']).sales.mean().reset_index(), x='month', y='sales', color='year', title='Seasonality Check - Average Sales by Month') # fig.show() # # Seasonality check - Average sales by week # df_train['month'] = df_train['date'].dt.month # df_train['year'] = df_train['date'].dt.year # df_train['week_of_year'] = df_train['date'].dt.weekofyear # fig = px.line(df_train.groupby(['year','week_of_year']).sales.mean().reset_index(), x='week', y='sales', color='year', title='Seasonality Check - Average Sales by Week') # fig.show() # import plotly.graph_objs as go # from statsmodels.graphics.tsaplots import plot_pacf # from statsmodels.tsa.stattools import pacf # import matplotlib.pyplot as plt # # Calculate PACF # # Group the data by store # grouped_stores = df_train.groupby('store') # # Plot PACF for each store # for store, data in grouped_stores: # plt.figure() # # plt.title(f'Partial Autocorrelation for Store {store}') # plot_pacf(data['sales']) # plt.show() # Calculate SMAPE def smape(y_true, y_pred): return ( 100.0 / len(y_true) * np.sum(2 * np.abs(y_pred - y_true) / (np.abs(y_true) + np.abs(y_pred))) ) import pandas as pd import numpy as np from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder # feature engineering # Create lagged features for each combination of store and item # for i in range(1, 31): # df_combined[f"lag_{i}"] = df_combined.groupby(["store", "item"])["sales"].shift(i) # Feature Engineering - Categorical df_combined["day_of_week"] = df_combined["date"].dt.dayofweek df_combined["month"] = df_combined["date"].dt.month df_combined["year"] = df_combined["date"].dt.year # df['week'] = df['date'].dt.week df_combined["day_of_year"] = df_combined["date"].dt.dayofyear df_combined["week_of_year"] = df_combined["date"].dt.weekofyear df_combined["sin_day_of_week"] = np.sin(2 * np.pi * df_combined["day_of_week"] / 7) df_combined["cos_day_of_week"] = np.cos(2 * np.pi * df_combined["day_of_week"] / 7) # Encode categorical features le_item = LabelEncoder() le_store = LabelEncoder() df_combined["item"] = le_item.fit_transform(df_combined["item"]) df_combined["store"] = le_store.fit_transform(df_combined["store"]) # item_dummies = pd.get_dummies(df_combined['item'], prefix='item') # df_combined = pd.concat([df_combined, item_dummies], axis=1) # store_dummies = pd.get_dummies(df_combined['store'], prefix='store') # df_combined = pd.concat([df_combined, store_dummies], axis=1) # Create dummy variables for day_of_week day_of_week_dummies = pd.get_dummies(df_combined["day_of_week"], prefix="day_of_week") df_combined = pd.concat([df_combined, day_of_week_dummies], axis=1) # create a new dataframe to hold the dummy variables # Create dummy variables for month month_dummies = pd.get_dummies(df_combined["month"], prefix="month") df_combined = pd.concat([df_combined, month_dummies], axis=1) # Create dummy variables for year year_dummies = pd.get_dummies(df_combined["year"], prefix="year") df_combined = pd.concat([df_combined, year_dummies], axis=1) # # Drop rows with NaN values # df = df.dropna() df_combined = df_combined.drop( ["month", "year", "day_of_year", "week_of_year", "day_of_week"], axis=1 ) # Separate your training and testing dataframes again df_train = df_combined[df_combined["sales"].notna()] df_test = df_combined[df_combined["sales"].isna()] # # print("SMAPE: ", smape(test["sales"].values, predictions)) column_list = df_combined.columns.tolist() print(column_list) df_train.dtypes df_train = df_train.drop("id", axis=1) df_train df_train = df_train.dropna() df_train df_test.columns # df_train = df_train.drop(['store','item'],axis = 1) # df_train from sklearn.model_selection import TimeSeriesSplit from lightgbm import LGBMRegressor # Number of splits n_splits = 5 # Initialize TimeSeriesSplit tscv = TimeSeriesSplit(n_splits=n_splits) model = LGBMRegressor() df_fc = df_train.copy() smape_values = [] # Perform cross-validation for train_index, test_index in tscv.split(df_train): CV_train, CV_test = df_train.iloc[train_index], df_train.iloc[test_index] # Fit the model on the training data model.fit(CV_train.drop(["sales", "date"], axis=1), CV_train["sales"]) # Predict on the test data predictions = model.predict(CV_test.drop(["sales", "date"], axis=1)) df_fc.loc[df_train.iloc[test_index].index, "predictions"] = predictions[0] # Calculate SMAPE and add it to the list of SMAPE values smape_value = smape(CV_test["sales"].values, predictions) smape_values.append(smape_value) # Print the average SMAPE value across all folds print("Average SMAPE: ", np.mean(smape_values)), smape_values # df_train df1 = df_train.drop(["date", "sales"], axis=1) df1 # df.columns # Get feature importances feature_importances = pd.DataFrame( {"Feature": df1.columns, "Importance": model.feature_importances_} ) feature_importances = feature_importances.sort_values("Importance", ascending=False) print(feature_importances["Feature"][feature_importances["Importance"] > 0]) px.bar( data_frame=pd.Series(model.feature_importances_, index=df1.columns).sort_values(), orientation="h", ) df_train = df_train[ [ "sales", "store", "item", "sin_day_of_week", "cos_day_of_week", "month_7", "year_2013", "month_1", "month_6", "month_2", "month_12", "year_2014", "month_3", "month_10", "year_2017", "year_2016", "year_2015", "month_8", "month_5", "day_of_week_6", "month_4", "month_11", "month_9", "day_of_week_3", "day_of_week_4", "day_of_week_1", ] ] df_train from sklearn.model_selection import TimeSeriesSplit from lightgbm import LGBMRegressor # Number of splits n_splits = 5 # Initialize TimeSeriesSplit tscv = TimeSeriesSplit(n_splits=n_splits) model = LGBMRegressor() df_fc = df_train.copy() smape_values = [] # Perform cross-validation for train_index, test_index in tscv.split(df_train): CV_train, CV_test = df_train.iloc[train_index], df_train.iloc[test_index] # Fit the model on the training data model.fit(CV_train.drop(["sales"], axis=1), CV_train["sales"]) # Predict on the test data predictions = model.predict(CV_test.drop(["sales"], axis=1)) df_fc.loc[df_train.iloc[test_index].index, "predictions"] = predictions[0] # Calculate SMAPE and add it to the list of SMAPE values smape_value = smape(CV_test["sales"].values, predictions) smape_values.append(smape_value) # Print the average SMAPE value across all folds print("Average SMAPE: ", np.mean(smape_values)), smape_values # # Final model df_test # df_test = df_test.drop(['store','item'],axis = 1) # df_test df_test = df_test[ [ "store", "item", "sin_day_of_week", "cos_day_of_week", "month_7", "year_2013", "month_1", "month_6", "month_2", "month_12", "year_2014", "month_3", "month_10", "year_2017", "year_2016", "year_2015", "month_8", "month_5", "day_of_week_6", "month_4", "month_11", "month_9", "day_of_week_3", "day_of_week_4", "day_of_week_1", ] ] df_test predictions = [] # Create a separate DataFrame to store the lagged predictions lagged_predictions = df_test.copy() # Iterate over the test set for i in range(len(df_test)): # Prepare the data for the current day, including lagged features data = lagged_predictions.iloc[i : i + 1].copy() # Make a prediction for the current day prediction = model.predict(data) # Store the prediction predictions.append(prediction[0]) # # If there are still more days to predict, update the necessary lagged features in the lagged_predictions DataFrame # if i < len(df_test) - 1: # for j in range(1, 31): # if i + j < len(df_test): # lagged_predictions.loc[i + j, f'lag_{j}'] = prediction[0] # Convert the list of predictions to a DataFrame or series, if necessary predictions = pd.Series(predictions) predictions # load test set url2 = "/kaggle/input/demand-forecasting-kernels-only/test.csv" df_test = pd.read_csv(url2) df_test["date"] = pd.to_datetime(df_test["date"]) df_test.head() # Add predictions to the test dataframe df_test["predictions"] = predictions.values df_test submission_df = df_test[["id", "predictions"]] submission_df submission_df.rename(columns={"predictions": "sales"}, inplace=True) submission_df submission_df.to_csv("submission.csv", index=False) # submission = (pd.DataFrame(Y_test, index=X_test.index, columns=Y.columns) # .unstack() # .reset_index() # .sort_values(["item","store","date"]) # .drop(["item","store","date"], axis=1) # .reset_index() # .rename({0:"sales","index":"id"}, axis=1) # .set_index("id") # ) # submission.to_csv("submission.csv") # submission # # Prepare the submission data # submission = pd.DataFrame({'id': test_data.id, 'sales': predictions}) # submission.to_csv("submission.csv", index=False) # submission
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/550/129550650.ipynb
null
null
[{"Id": 129550650, "ScriptId": 38512674, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11964784, "CreationDate": "05/14/2023 18:44:07", "VersionNumber": 10.0, "Title": "notebook4d7d117c79", "EvaluationDate": "05/14/2023", "IsChange": false, "TotalLines": 347.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 347.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder url = "/kaggle/input/demand-forecasting-kernels-only/train.csv" df_train = pd.read_csv(url) df_train["date"] = pd.to_datetime(df_train["date"]) df_train.head() # load test set url2 = "/kaggle/input/demand-forecasting-kernels-only/test.csv" df_test = pd.read_csv(url2) df_test["date"] = pd.to_datetime(df_test["date"]) df_test.head() # Concatenate the training and testing dataframes df_combined = pd.concat([df_train, df_test]).reset_index(drop=True) # Display basic statistics df_train.describe() # Check for missing values df_train.isnull().sum() import plotly.express as px # # Downsample the data by month and calculate the mean sales for each month # df_downsampled = df_train.resample('M', on='date').mean() # # Create a line plot using Plotly Express # fig = px.line(df_downsampled, x=df_downsampled.index, y='sales', title='Sales Over Time') # # Display the plot # fig.show() # # Sales by store # fig = px.bar(df_train.groupby('store')['sales'].sum().reset_index(), x='store', y='sales', title='Sales by Store') # fig.show() # # Sales by item # fig = px.bar(df_train.groupby('item')['sales'].sum().reset_index(), x='item', y='sales', title='Sales by Item') # fig.show() # # Seasonality check - Average sales by month # df_train['month'] = df_train['date'].dt.month # df_train['year'] = df_train['date'].dt.year # fig = px.line(df_train.groupby(['year','month']).sales.mean().reset_index(), x='month', y='sales', color='year', title='Seasonality Check - Average Sales by Month') # fig.show() # # Seasonality check - Average sales by week # df_train['month'] = df_train['date'].dt.month # df_train['year'] = df_train['date'].dt.year # df_train['week_of_year'] = df_train['date'].dt.weekofyear # fig = px.line(df_train.groupby(['year','week_of_year']).sales.mean().reset_index(), x='week', y='sales', color='year', title='Seasonality Check - Average Sales by Week') # fig.show() # import plotly.graph_objs as go # from statsmodels.graphics.tsaplots import plot_pacf # from statsmodels.tsa.stattools import pacf # import matplotlib.pyplot as plt # # Calculate PACF # # Group the data by store # grouped_stores = df_train.groupby('store') # # Plot PACF for each store # for store, data in grouped_stores: # plt.figure() # # plt.title(f'Partial Autocorrelation for Store {store}') # plot_pacf(data['sales']) # plt.show() # Calculate SMAPE def smape(y_true, y_pred): return ( 100.0 / len(y_true) * np.sum(2 * np.abs(y_pred - y_true) / (np.abs(y_true) + np.abs(y_pred))) ) import pandas as pd import numpy as np from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder # feature engineering # Create lagged features for each combination of store and item # for i in range(1, 31): # df_combined[f"lag_{i}"] = df_combined.groupby(["store", "item"])["sales"].shift(i) # Feature Engineering - Categorical df_combined["day_of_week"] = df_combined["date"].dt.dayofweek df_combined["month"] = df_combined["date"].dt.month df_combined["year"] = df_combined["date"].dt.year # df['week'] = df['date'].dt.week df_combined["day_of_year"] = df_combined["date"].dt.dayofyear df_combined["week_of_year"] = df_combined["date"].dt.weekofyear df_combined["sin_day_of_week"] = np.sin(2 * np.pi * df_combined["day_of_week"] / 7) df_combined["cos_day_of_week"] = np.cos(2 * np.pi * df_combined["day_of_week"] / 7) # Encode categorical features le_item = LabelEncoder() le_store = LabelEncoder() df_combined["item"] = le_item.fit_transform(df_combined["item"]) df_combined["store"] = le_store.fit_transform(df_combined["store"]) # item_dummies = pd.get_dummies(df_combined['item'], prefix='item') # df_combined = pd.concat([df_combined, item_dummies], axis=1) # store_dummies = pd.get_dummies(df_combined['store'], prefix='store') # df_combined = pd.concat([df_combined, store_dummies], axis=1) # Create dummy variables for day_of_week day_of_week_dummies = pd.get_dummies(df_combined["day_of_week"], prefix="day_of_week") df_combined = pd.concat([df_combined, day_of_week_dummies], axis=1) # create a new dataframe to hold the dummy variables # Create dummy variables for month month_dummies = pd.get_dummies(df_combined["month"], prefix="month") df_combined = pd.concat([df_combined, month_dummies], axis=1) # Create dummy variables for year year_dummies = pd.get_dummies(df_combined["year"], prefix="year") df_combined = pd.concat([df_combined, year_dummies], axis=1) # # Drop rows with NaN values # df = df.dropna() df_combined = df_combined.drop( ["month", "year", "day_of_year", "week_of_year", "day_of_week"], axis=1 ) # Separate your training and testing dataframes again df_train = df_combined[df_combined["sales"].notna()] df_test = df_combined[df_combined["sales"].isna()] # # print("SMAPE: ", smape(test["sales"].values, predictions)) column_list = df_combined.columns.tolist() print(column_list) df_train.dtypes df_train = df_train.drop("id", axis=1) df_train df_train = df_train.dropna() df_train df_test.columns # df_train = df_train.drop(['store','item'],axis = 1) # df_train from sklearn.model_selection import TimeSeriesSplit from lightgbm import LGBMRegressor # Number of splits n_splits = 5 # Initialize TimeSeriesSplit tscv = TimeSeriesSplit(n_splits=n_splits) model = LGBMRegressor() df_fc = df_train.copy() smape_values = [] # Perform cross-validation for train_index, test_index in tscv.split(df_train): CV_train, CV_test = df_train.iloc[train_index], df_train.iloc[test_index] # Fit the model on the training data model.fit(CV_train.drop(["sales", "date"], axis=1), CV_train["sales"]) # Predict on the test data predictions = model.predict(CV_test.drop(["sales", "date"], axis=1)) df_fc.loc[df_train.iloc[test_index].index, "predictions"] = predictions[0] # Calculate SMAPE and add it to the list of SMAPE values smape_value = smape(CV_test["sales"].values, predictions) smape_values.append(smape_value) # Print the average SMAPE value across all folds print("Average SMAPE: ", np.mean(smape_values)), smape_values # df_train df1 = df_train.drop(["date", "sales"], axis=1) df1 # df.columns # Get feature importances feature_importances = pd.DataFrame( {"Feature": df1.columns, "Importance": model.feature_importances_} ) feature_importances = feature_importances.sort_values("Importance", ascending=False) print(feature_importances["Feature"][feature_importances["Importance"] > 0]) px.bar( data_frame=pd.Series(model.feature_importances_, index=df1.columns).sort_values(), orientation="h", ) df_train = df_train[ [ "sales", "store", "item", "sin_day_of_week", "cos_day_of_week", "month_7", "year_2013", "month_1", "month_6", "month_2", "month_12", "year_2014", "month_3", "month_10", "year_2017", "year_2016", "year_2015", "month_8", "month_5", "day_of_week_6", "month_4", "month_11", "month_9", "day_of_week_3", "day_of_week_4", "day_of_week_1", ] ] df_train from sklearn.model_selection import TimeSeriesSplit from lightgbm import LGBMRegressor # Number of splits n_splits = 5 # Initialize TimeSeriesSplit tscv = TimeSeriesSplit(n_splits=n_splits) model = LGBMRegressor() df_fc = df_train.copy() smape_values = [] # Perform cross-validation for train_index, test_index in tscv.split(df_train): CV_train, CV_test = df_train.iloc[train_index], df_train.iloc[test_index] # Fit the model on the training data model.fit(CV_train.drop(["sales"], axis=1), CV_train["sales"]) # Predict on the test data predictions = model.predict(CV_test.drop(["sales"], axis=1)) df_fc.loc[df_train.iloc[test_index].index, "predictions"] = predictions[0] # Calculate SMAPE and add it to the list of SMAPE values smape_value = smape(CV_test["sales"].values, predictions) smape_values.append(smape_value) # Print the average SMAPE value across all folds print("Average SMAPE: ", np.mean(smape_values)), smape_values # # Final model df_test # df_test = df_test.drop(['store','item'],axis = 1) # df_test df_test = df_test[ [ "store", "item", "sin_day_of_week", "cos_day_of_week", "month_7", "year_2013", "month_1", "month_6", "month_2", "month_12", "year_2014", "month_3", "month_10", "year_2017", "year_2016", "year_2015", "month_8", "month_5", "day_of_week_6", "month_4", "month_11", "month_9", "day_of_week_3", "day_of_week_4", "day_of_week_1", ] ] df_test predictions = [] # Create a separate DataFrame to store the lagged predictions lagged_predictions = df_test.copy() # Iterate over the test set for i in range(len(df_test)): # Prepare the data for the current day, including lagged features data = lagged_predictions.iloc[i : i + 1].copy() # Make a prediction for the current day prediction = model.predict(data) # Store the prediction predictions.append(prediction[0]) # # If there are still more days to predict, update the necessary lagged features in the lagged_predictions DataFrame # if i < len(df_test) - 1: # for j in range(1, 31): # if i + j < len(df_test): # lagged_predictions.loc[i + j, f'lag_{j}'] = prediction[0] # Convert the list of predictions to a DataFrame or series, if necessary predictions = pd.Series(predictions) predictions # load test set url2 = "/kaggle/input/demand-forecasting-kernels-only/test.csv" df_test = pd.read_csv(url2) df_test["date"] = pd.to_datetime(df_test["date"]) df_test.head() # Add predictions to the test dataframe df_test["predictions"] = predictions.values df_test submission_df = df_test[["id", "predictions"]] submission_df submission_df.rename(columns={"predictions": "sales"}, inplace=True) submission_df submission_df.to_csv("submission.csv", index=False) # submission = (pd.DataFrame(Y_test, index=X_test.index, columns=Y.columns) # .unstack() # .reset_index() # .sort_values(["item","store","date"]) # .drop(["item","store","date"], axis=1) # .reset_index() # .rename({0:"sales","index":"id"}, axis=1) # .set_index("id") # ) # submission.to_csv("submission.csv") # submission # # Prepare the submission data # submission = pd.DataFrame({'id': test_data.id, 'sales': predictions}) # submission.to_csv("submission.csv", index=False) # submission
false
0
3,761
0
3,761
3,761
129550705
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import zipfile z = zipfile.ZipFile("/kaggle/input/sberbank-russian-housing-market/train.csv.zip") z.extractall() df_train = pd.read_csv("/kaggle/working/train.csv") df_train.head() num_linhas = len(df_train) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") # Transforma as categorias em valores numericos from sklearn.preprocessing import LabelEncoder for f in df_train.columns: if df_train[f].dtype == "object": lbl = LabelEncoder() lbl.fit(list(df_train[f].values)) df_train[f] = lbl.transform(list(df_train[f].values)) df_train.head() # Aplica uma média para remover valores nulos. for col in df_train.columns: if df_train[col].isnull().sum() > 0: mean = df_train[col].mean() df_train[col] = df_train[col].fillna(mean) df_train.head() # Determina as colunas que serão utilizadas no treino do modelo e qual coluna será considerada Target. X = df_train[ [ "full_sq", "life_sq", "floor", "school_km", "ecology", "max_floor", "material", "build_year", "num_room", ] ] y = np.log(df_train.price_doc) # Separação do arquivo de teste em Treino e Teste from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Normalização das partes from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # Atribuição da regressão ao modelo from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso, Ridge, ElasticNet modelo = ElasticNet() modelo.fit(X_train, y_train) # Determina os coeficientes do modelo modelo.coef_, modelo.intercept_ # Demonstra os indicadores de performance do modelo from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_percentage_error from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_log_error import numpy as np y_pred = modelo.predict(X_train) mae = mean_absolute_error(y_train, y_pred) mape = mean_absolute_percentage_error(y_train, y_pred) rmse = mean_squared_error(y_train, y_pred) ** 0.5 rmsle = np.sqrt(mean_squared_log_error(y_train, y_pred)) r2 = r2_score(y_train, y_pred) print("MAE:", mae) print("MAPE:", mape) print("RMSE:", rmse) print("RMSLE:", rmsle) print("R2:", r2) print("") y_pred = modelo.predict(X_test) mae = mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) rmse = mean_squared_error(y_test, y_pred) ** 0.5 rmsle = np.sqrt(mean_squared_log_error(y_test, y_pred)) r2 = r2_score(y_test, y_pred) print("MAE:", mae) print("MAPE:", mape) print("RMSE:", rmse) print("RMSLE:", rmsle) print("R2:", r2) # Realiza o upload do arquivo de Teste import zipfile z = zipfile.ZipFile("/kaggle/input/sberbank-russian-housing-market/test.csv.zip") z.extractall() df_test = pd.read_csv("/kaggle/working/test.csv") num_linhas = len(df_test) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") # Transforma as categorias em valores numericos from sklearn.preprocessing import LabelEncoder for f in df_test.columns: if df_test[f].dtype == "object": lbl = LabelEncoder() lbl.fit(list(df_test[f].values)) df_test[f] = lbl.transform(list(df_test[f].values)) import pandas as pd num_linhas = len(df_test) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") # Aplica uma média para remover valores nulos. for col in df_test.columns: if df_test[col].isnull().sum() > 0: mean = df_test[col].mean() df_test[col] = df_test[col].fillna(mean) num_linhas = len(df_test) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") # Determina as colunas que serão utilizadas para testar no modelo de previsão X_test = df_test[ [ "full_sq", "life_sq", "floor", "school_km", "ecology", "max_floor", "material", "build_year", "num_room", ] ] # Utiliza as colunas selecionadas em X_test para fazer a previsão no modelo. y_pred = modelo.predict(X_test) # Aplica a função exponencial nos modelos previstos, ja que o modelo preve numeros logaritimos e não valores reais y_pred = np.exp(y_pred) # #Cria # output = pd.DataFrame({'id': df_test.id, 'price_doc': y_pred}) # output.to_csv('submission.csv', index=False) # print("Your submission was successfully saved!") # output.head() # Cria uma coluna utilizado os preços previstos pelo o metodo e salva output = pd.DataFrame({"id": df_test.id, "price_doc": y_pred}) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!") output.head() num_linhas = len(output) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/550/129550705.ipynb
null
null
[{"Id": 129550705, "ScriptId": 38519986, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14697649, "CreationDate": "05/14/2023 18:44:46", "VersionNumber": 2.0, "Title": "163066_Regressao_AC2", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 187.0, "LinesInsertedFromPrevious": 28.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 159.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import zipfile z = zipfile.ZipFile("/kaggle/input/sberbank-russian-housing-market/train.csv.zip") z.extractall() df_train = pd.read_csv("/kaggle/working/train.csv") df_train.head() num_linhas = len(df_train) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") # Transforma as categorias em valores numericos from sklearn.preprocessing import LabelEncoder for f in df_train.columns: if df_train[f].dtype == "object": lbl = LabelEncoder() lbl.fit(list(df_train[f].values)) df_train[f] = lbl.transform(list(df_train[f].values)) df_train.head() # Aplica uma média para remover valores nulos. for col in df_train.columns: if df_train[col].isnull().sum() > 0: mean = df_train[col].mean() df_train[col] = df_train[col].fillna(mean) df_train.head() # Determina as colunas que serão utilizadas no treino do modelo e qual coluna será considerada Target. X = df_train[ [ "full_sq", "life_sq", "floor", "school_km", "ecology", "max_floor", "material", "build_year", "num_room", ] ] y = np.log(df_train.price_doc) # Separação do arquivo de teste em Treino e Teste from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Normalização das partes from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # Atribuição da regressão ao modelo from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso, Ridge, ElasticNet modelo = ElasticNet() modelo.fit(X_train, y_train) # Determina os coeficientes do modelo modelo.coef_, modelo.intercept_ # Demonstra os indicadores de performance do modelo from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_percentage_error from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_log_error import numpy as np y_pred = modelo.predict(X_train) mae = mean_absolute_error(y_train, y_pred) mape = mean_absolute_percentage_error(y_train, y_pred) rmse = mean_squared_error(y_train, y_pred) ** 0.5 rmsle = np.sqrt(mean_squared_log_error(y_train, y_pred)) r2 = r2_score(y_train, y_pred) print("MAE:", mae) print("MAPE:", mape) print("RMSE:", rmse) print("RMSLE:", rmsle) print("R2:", r2) print("") y_pred = modelo.predict(X_test) mae = mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) rmse = mean_squared_error(y_test, y_pred) ** 0.5 rmsle = np.sqrt(mean_squared_log_error(y_test, y_pred)) r2 = r2_score(y_test, y_pred) print("MAE:", mae) print("MAPE:", mape) print("RMSE:", rmse) print("RMSLE:", rmsle) print("R2:", r2) # Realiza o upload do arquivo de Teste import zipfile z = zipfile.ZipFile("/kaggle/input/sberbank-russian-housing-market/test.csv.zip") z.extractall() df_test = pd.read_csv("/kaggle/working/test.csv") num_linhas = len(df_test) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") # Transforma as categorias em valores numericos from sklearn.preprocessing import LabelEncoder for f in df_test.columns: if df_test[f].dtype == "object": lbl = LabelEncoder() lbl.fit(list(df_test[f].values)) df_test[f] = lbl.transform(list(df_test[f].values)) import pandas as pd num_linhas = len(df_test) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") # Aplica uma média para remover valores nulos. for col in df_test.columns: if df_test[col].isnull().sum() > 0: mean = df_test[col].mean() df_test[col] = df_test[col].fillna(mean) num_linhas = len(df_test) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") # Determina as colunas que serão utilizadas para testar no modelo de previsão X_test = df_test[ [ "full_sq", "life_sq", "floor", "school_km", "ecology", "max_floor", "material", "build_year", "num_room", ] ] # Utiliza as colunas selecionadas em X_test para fazer a previsão no modelo. y_pred = modelo.predict(X_test) # Aplica a função exponencial nos modelos previstos, ja que o modelo preve numeros logaritimos e não valores reais y_pred = np.exp(y_pred) # #Cria # output = pd.DataFrame({'id': df_test.id, 'price_doc': y_pred}) # output.to_csv('submission.csv', index=False) # print("Your submission was successfully saved!") # output.head() # Cria uma coluna utilizado os preços previstos pelo o metodo e salva output = pd.DataFrame({"id": df_test.id, "price_doc": y_pred}) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!") output.head() num_linhas = len(output) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.")
false
0
1,840
0
1,840
1,840
129550253
# ## 2 Построение, обучение и оптимизация модели # Импорт библиотек import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.svm import SVC from sklearn.neural_network import MLPClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from tqdm import tqdm from sklearn.model_selection import learning_curve # Загрузка данных df = pd.read_csv("articles.csv") df = df.iloc[:, 1:] df.head() # У компьютера недостаточно оперативной памяти. Поэтому для обучение будет взята половина данных. half = df.sample(frac=0.5) half.reset_index(drop=True, inplace=True) half.info() # ### 2.1 Построение модели классификации # метод для рассчета метрик качества модели классификации def classification_metrics(y_test, y_pred): acc = accuracy_score(y_test, y_pred) prec = precision_score(y_test, y_pred, average="macro") rec = recall_score(y_test, y_pred, average="macro") f1 = f1_score(y_test, y_pred, average="macro") return {"Accuracy": acc, "Precision": prec, "Recall": rec, "F1-score": f1} # Для обучения будет использоваться целевая переменная и лемматизированный текст статьи X = half["lematize_text"] # векторизация текстового признака from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() X = vectorizer.fit_transform(X) X = X.toarray() y = half["nomination_encoded"] # Разбиение выборки на тренировочную и тестовую # 1/3 - это значит, что тестовая выборка будет иметь 0.33 данных from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42, shuffle=True ) # GaussianNB - это наивный байесовский алгоритм, который основан на вероятностной модели, и он используется для решения задач классификации и регрессии. nb = GaussianNB().fit(X_train, y_train) model_preds = nb.predict(X_test) accuracy = accuracy_score(y_test, model_preds) print("Accuracy:", accuracy) # DecisionTreeClassifier - это алгоритм, который используется для решения задач классификации и регрессии и основан на построении дерева решений на основе анализа данных и их параметров. dtc = DecisionTreeClassifier().fit(X_train, y_train) model_preds = dtc.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, model_preds).items(): print(f"{metric_name}: {metric_score:.2f}") # RandomForestClassifier - это алгоритм машинного обучения, который основан на использовании ансамбля деревьев решений для решения задач классификации, кластеризации или регрессии. rfc = RandomForestClassifier().fit(X_train, y_train) model_preds = rfc.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, model_preds).items(): print(f"{metric_name}: {metric_score:.2f}") # KNeighborsClassifier - это алгоритм машинного обучения, который основан на методе ближайших соседей (k-NN) для решения задач классификации и регрессии. knn = KNeighborsClassifier(n_neighbors=5) model_fit = knn.fit(X_train, y_train) model_preds = model_fit.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, model_preds).items(): print(f"{metric_name}: {metric_score:.2f}") # -------------------------------------------------------------------------- # GradientBoostingClassifier - это ансамблевый метод машинного обучения, который используется для решения задач классификации, основанный на построении последовательности деревьев решений, каждое из которых исправляет ошибки предыдущих моделей, минимизируя функцию потерь. gbc = GradientBoostingClassifier( n_estimators=100, learning_rate=0.1, max_depth=1, random_state=42 ) gbc.fit(X_train, y_train) y_pred_gbc = gbc.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, y_pred_gbc).items(): print(f"{metric_name}: {metric_score:.2f}") # GradientBoostingClassifier показал лучший результат, но из-за долгого времени обучения не рекомендуется к использованию на демоэкзамене. # -------------------------------------------------------------------------- # SVC (Support Vector Classifier) - это алгоритм машинного обучения, который используется для решения задач классификации, основанный на поиске оптимальной разделяющей гиперплоскости в многомерном пространстве с максимальным зазором между классами. svm = SVC(kernel="linear", C=1, random_state=42) svm.fit(X_train, y_train) y_pred_svm = svm.predict(X_test) acc_svm = accuracy_score(y_test, y_pred_svm) for metric_name, metric_score in classification_metrics(y_test, y_pred_svm).items(): print(f"{metric_name}: {metric_score:.2f}") # MLPClassifier - это нейронная сеть прямого распространения, которая используется для решения задач классификации или регрессии и состоит из нескольких слоев скрытых нейронов, обучаемых с помощью метода обратного распространения ошибки. nn = MLPClassifier( hidden_layer_sizes=(10,), activation="relu", solver="adam", alpha=0.0001, max_iter=500, random_state=42, ) nn.fit(X_train, y_train) y_pred_nn = nn.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, y_pred_nn).items(): print(f"{metric_name}: {metric_score:.2f}") # Для последующей оптимизации выбрана модель классификации дерева решений DecisionTreeClassifier, так как эта модель показала наивысшее качество обучения. # *Примечание. модель GradientBoostingClassifier не была выбрана из-за продолжительного обучения, несмотря на то, что качество лучше чем у модели дерева решений # ## 2.2 Оптимизация модели # Оптимизация модели будет проводиться путем поиска наилучших гиперпараметров param_grid = {"max_depth": range(1, 6, 1), "min_samples_leaf": range(1, 6, 1)} grid_search = GridSearchCV(dtc, param_grid, cv=5) grid_search.fit(X_train, y_train) print(grid_search.best_params_) y_pred_gs = grid_search.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, y_pred_gs).items(): print(f"{metric_name}: {metric_score:.2f}") # Построение кривых валидации и обучения train_sizes, train_scores, test_scores = learning_curve( dtc, X_train, y_train, cv=5, scoring="accuracy" ) # Вычисляем среднюю точность и стандартное отклонение для каждого размера обучающей выборки train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) # Строим кривую обучения и кривую валидации plt.plot(train_sizes, train_mean, label="Training Accuracy") plt.plot(train_sizes, test_mean, label="Validation Accuracy") # Добавляем круги для отображения стандартного отклонения plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.1) plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, alpha=0.1) # Подписываем график и добавляем легенду plt.xlabel("Number of Training Samples") plt.ylabel("Accuracy Score") plt.title("Learning Curve for DecisionTreeClassifier") plt.legend(loc="best") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/550/129550253.ipynb
null
null
[{"Id": 129550253, "ScriptId": 38521932, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13417746, "CreationDate": "05/14/2023 18:39:30", "VersionNumber": 1.0, "Title": "Report2-VD-djostit", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 214.0, "LinesInsertedFromPrevious": 214.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## 2 Построение, обучение и оптимизация модели # Импорт библиотек import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.svm import SVC from sklearn.neural_network import MLPClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from tqdm import tqdm from sklearn.model_selection import learning_curve # Загрузка данных df = pd.read_csv("articles.csv") df = df.iloc[:, 1:] df.head() # У компьютера недостаточно оперативной памяти. Поэтому для обучение будет взята половина данных. half = df.sample(frac=0.5) half.reset_index(drop=True, inplace=True) half.info() # ### 2.1 Построение модели классификации # метод для рассчета метрик качества модели классификации def classification_metrics(y_test, y_pred): acc = accuracy_score(y_test, y_pred) prec = precision_score(y_test, y_pred, average="macro") rec = recall_score(y_test, y_pred, average="macro") f1 = f1_score(y_test, y_pred, average="macro") return {"Accuracy": acc, "Precision": prec, "Recall": rec, "F1-score": f1} # Для обучения будет использоваться целевая переменная и лемматизированный текст статьи X = half["lematize_text"] # векторизация текстового признака from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() X = vectorizer.fit_transform(X) X = X.toarray() y = half["nomination_encoded"] # Разбиение выборки на тренировочную и тестовую # 1/3 - это значит, что тестовая выборка будет иметь 0.33 данных from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42, shuffle=True ) # GaussianNB - это наивный байесовский алгоритм, который основан на вероятностной модели, и он используется для решения задач классификации и регрессии. nb = GaussianNB().fit(X_train, y_train) model_preds = nb.predict(X_test) accuracy = accuracy_score(y_test, model_preds) print("Accuracy:", accuracy) # DecisionTreeClassifier - это алгоритм, который используется для решения задач классификации и регрессии и основан на построении дерева решений на основе анализа данных и их параметров. dtc = DecisionTreeClassifier().fit(X_train, y_train) model_preds = dtc.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, model_preds).items(): print(f"{metric_name}: {metric_score:.2f}") # RandomForestClassifier - это алгоритм машинного обучения, который основан на использовании ансамбля деревьев решений для решения задач классификации, кластеризации или регрессии. rfc = RandomForestClassifier().fit(X_train, y_train) model_preds = rfc.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, model_preds).items(): print(f"{metric_name}: {metric_score:.2f}") # KNeighborsClassifier - это алгоритм машинного обучения, который основан на методе ближайших соседей (k-NN) для решения задач классификации и регрессии. knn = KNeighborsClassifier(n_neighbors=5) model_fit = knn.fit(X_train, y_train) model_preds = model_fit.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, model_preds).items(): print(f"{metric_name}: {metric_score:.2f}") # -------------------------------------------------------------------------- # GradientBoostingClassifier - это ансамблевый метод машинного обучения, который используется для решения задач классификации, основанный на построении последовательности деревьев решений, каждое из которых исправляет ошибки предыдущих моделей, минимизируя функцию потерь. gbc = GradientBoostingClassifier( n_estimators=100, learning_rate=0.1, max_depth=1, random_state=42 ) gbc.fit(X_train, y_train) y_pred_gbc = gbc.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, y_pred_gbc).items(): print(f"{metric_name}: {metric_score:.2f}") # GradientBoostingClassifier показал лучший результат, но из-за долгого времени обучения не рекомендуется к использованию на демоэкзамене. # -------------------------------------------------------------------------- # SVC (Support Vector Classifier) - это алгоритм машинного обучения, который используется для решения задач классификации, основанный на поиске оптимальной разделяющей гиперплоскости в многомерном пространстве с максимальным зазором между классами. svm = SVC(kernel="linear", C=1, random_state=42) svm.fit(X_train, y_train) y_pred_svm = svm.predict(X_test) acc_svm = accuracy_score(y_test, y_pred_svm) for metric_name, metric_score in classification_metrics(y_test, y_pred_svm).items(): print(f"{metric_name}: {metric_score:.2f}") # MLPClassifier - это нейронная сеть прямого распространения, которая используется для решения задач классификации или регрессии и состоит из нескольких слоев скрытых нейронов, обучаемых с помощью метода обратного распространения ошибки. nn = MLPClassifier( hidden_layer_sizes=(10,), activation="relu", solver="adam", alpha=0.0001, max_iter=500, random_state=42, ) nn.fit(X_train, y_train) y_pred_nn = nn.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, y_pred_nn).items(): print(f"{metric_name}: {metric_score:.2f}") # Для последующей оптимизации выбрана модель классификации дерева решений DecisionTreeClassifier, так как эта модель показала наивысшее качество обучения. # *Примечание. модель GradientBoostingClassifier не была выбрана из-за продолжительного обучения, несмотря на то, что качество лучше чем у модели дерева решений # ## 2.2 Оптимизация модели # Оптимизация модели будет проводиться путем поиска наилучших гиперпараметров param_grid = {"max_depth": range(1, 6, 1), "min_samples_leaf": range(1, 6, 1)} grid_search = GridSearchCV(dtc, param_grid, cv=5) grid_search.fit(X_train, y_train) print(grid_search.best_params_) y_pred_gs = grid_search.predict(X_test) for metric_name, metric_score in classification_metrics(y_test, y_pred_gs).items(): print(f"{metric_name}: {metric_score:.2f}") # Построение кривых валидации и обучения train_sizes, train_scores, test_scores = learning_curve( dtc, X_train, y_train, cv=5, scoring="accuracy" ) # Вычисляем среднюю точность и стандартное отклонение для каждого размера обучающей выборки train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) # Строим кривую обучения и кривую валидации plt.plot(train_sizes, train_mean, label="Training Accuracy") plt.plot(train_sizes, test_mean, label="Validation Accuracy") # Добавляем круги для отображения стандартного отклонения plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.1) plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, alpha=0.1) # Подписываем график и добавляем легенду plt.xlabel("Number of Training Samples") plt.ylabel("Accuracy Score") plt.title("Learning Curve for DecisionTreeClassifier") plt.legend(loc="best") plt.show()
false
0
2,622
0
2,622
2,622
129550291
<jupyter_start><jupyter_text>Handwritten Math Symbols # Context While working on [project](http://sagyamthapa.me/Handwritten-Optical-Character-Recognition/) to build a calculator that evaluates handwritten math symbols, I realized 3 main problems: 1. The resolution of images were too small usually 32*32 pixels. 2. The quality of data was not satisfactory. Either too many copies of same image or not enough varieties. 3. The models trained on such datasets performed poorly in real world scenarios. 4. Main issue with existing MNIST and CHROME dataset was that resizing images from 400x400 pixels(ideal for writing in canvas) to 32x32 pixels means only around 8% of original data will be given to model. So we cannot expect model to perform well in real world scenarios. So, I set out to create my own dataset from scratch. # Methodology If you follow the project link you will see a save button. When the model fails to recognize a symbol I saved the image and added it to the dataset. After multiple iteration of adding new images to dataset and training the model on new dataset, I have created created a model that was fairly good at real world testing. This dataset was used to train the model that you are testing right now. # Content This dataset contains over 9000 handwritten digits and arithmetic operators. Total no of classes: 16 Digits: 0 1 2 3 4 5 6 7 8 9 Operators: Plus Minus Multiplication Division Decimal Equals Most images are of resolution 400x400 pixels. Some may be 155x155. I have resized image to 100x100 for in the [started notebook](https://www.kaggle.com/sagyamthapa/starter-notebook). Each class contains about 500 examples. # Inspiration This dataset was created for training the model for my [project](http://sagyamthapa.me/Handwritten-Optical-Character-Recognition/) . # Contact Email me at: [email protected] My website: [sagyamthapa.me](https://sagyamthapa.me/#contact-form) Kaggle dataset identifier: handwritten-math-symbols <jupyter_script>import pandas as pd import numpy as np import os import keras import matplotlib.pyplot as plt from keras.layers import ( Dense, Dropout, Flatten, ZeroPadding2D, Conv2D, MaxPooling2D, Activation, GlobalAveragePooling2D, ) from keras.preprocessing import image from keras.applications.mobilenet import preprocess_input from keras.preprocessing.image import ImageDataGenerator from keras.models import Model, Sequential from keras.optimizers import Adam from sklearn.model_selection import train_test_split from keras.callbacks import EarlyStopping, ReduceLROnPlateau from sklearn.utils import class_weight # splits the dataset into the three sets based on the specified ratio, # which in this case is 60% training, 20% validation, and 20% test import splitfolders splitfolders.ratio( "../input/handwritten-math-symbols/dataset", output="./", seed=1337, ratio=(0.6, 0.2, 0.2), group_prefix=None, ) # default values # # Initialize ImageDataGenerator objects for training and validation sets, create generators with specified parameters.The images are resized to 224x224 pixels, converted to RGB color mode, and batched into groups of 24. import os NUM_CLASSES = len(os.listdir(r"./test")) train_datagen = ImageDataGenerator( preprocessing_function=preprocess_input ) # included in our dependencies train_generator = train_datagen.flow_from_directory( r"./train", # this is where you specify the path to the main data folder target_size=(224, 224), color_mode="rgb", batch_size=24, class_mode="categorical", shuffle=True, ) val_datagen = ImageDataGenerator( preprocessing_function=preprocess_input ) # included in our dependencies val_generator = val_datagen.flow_from_directory( r"./val", # this is where you specify the path to the main data folder target_size=(224, 224), color_mode="rgb", batch_size=24, class_mode="categorical", shuffle=True, ) # # Import pre-trained models, create sequential model with EfficientNetB6 and dense layer, print model summary. # from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.densenet import DenseNet201 from tensorflow.keras.applications.efficientnet import EfficientNetB6 md = EfficientNetB6( weights="imagenet", include_top=False, input_shape=(224, 224, 3), pooling="avg" ) from keras.utils import plot_model model = keras.models.Sequential( [md, keras.layers.Dense(NUM_CLASSES, activation="softmax")] ) # summarize layers print(model.summary()) # # Define callbacks, compile model, and train on generator data with specified parameters and callbacks. # earlystop = EarlyStopping(patience=3) learning_rate_reduction = ReduceLROnPlateau( monitor="loss", patience=2, verbose=1, factor=0.1, min_lr=0.0000000001 ) callback = [learning_rate_reduction] model.compile( optimizer=Adam(lr=0.00001), loss="categorical_crossentropy", metrics=["accuracy"] ) step_size_train = train_generator.n // train_generator.batch_size step_size_val = val_generator.n // val_generator.batch_size history = model.fit_generator( generator=train_generator, steps_per_epoch=step_size_train, validation_data=val_generator, validation_steps=step_size_val, epochs=25, callbacks=callback, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/550/129550291.ipynb
handwritten-math-symbols
sagyamthapa
[{"Id": 129550291, "ScriptId": 38501609, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8929361, "CreationDate": "05/14/2023 18:39:54", "VersionNumber": 1.0, "Title": "Hand Written Math Symbol Recognition_Rahul", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 76.0, "LinesInsertedFromPrevious": 76.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185725216, "KernelVersionId": 129550291, "SourceDatasetVersionId": 2947278}]
[{"Id": 2947278, "DatasetId": 1237036, "DatasourceVersionId": 2994798, "CreatorUserId": 4095338, "LicenseName": "GPL 2", "CreationDate": "12/20/2021 01:27:25", "VersionNumber": 4.0, "Title": "Handwritten Math Symbols", "Slug": "handwritten-math-symbols", "Subtitle": "Over 10000 Handwritten Digits and Arithmetic Operators", "Description": "# Context\nWhile working on [project](http://sagyamthapa.me/Handwritten-Optical-Character-Recognition/) to build a calculator that evaluates handwritten math symbols, I realized 3 main problems:\n\n1. The resolution of images were too small usually 32*32 pixels.\n2. The quality of data was not satisfactory. Either too many copies of same image or not enough varieties.\n3. The models trained on such datasets performed poorly in real world scenarios.\n4. Main issue with existing MNIST and CHROME dataset was that resizing images from 400x400 pixels(ideal for writing in canvas) to 32x32 pixels means only around 8% of original data will be given to model. So we cannot expect model to perform well in real world scenarios.\n\nSo, I set out to create my own dataset from scratch. \n\n# Methodology\n\nIf you follow the project link you will see a save button. When the model fails to recognize a symbol I saved the image and added it to the dataset. After multiple iteration of adding new images to dataset and training the model on new dataset, I have created created a model that was fairly good at real world testing. This dataset was used to train the model that you are testing right now. \n\n# Content\n\nThis dataset contains over 9000 handwritten digits and arithmetic operators.\nTotal no of classes: 16\nDigits: 0 1 2 3 4 5 6 7 8 9\nOperators: Plus Minus Multiplication Division Decimal Equals\nMost images are of resolution 400x400 pixels. Some may be 155x155. I have resized image to 100x100 for in the [started notebook](https://www.kaggle.com/sagyamthapa/starter-notebook).\nEach class contains about 500 examples. \n\n# Inspiration\n\nThis dataset was created for training the model for my [project](http://sagyamthapa.me/Handwritten-Optical-Character-Recognition/) .\n\n# Contact\nEmail me at: [email protected]\nMy website: [sagyamthapa.me](https://sagyamthapa.me/#contact-form)", "VersionNotes": "Data Update 2021/12/20", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1237036, "CreatorUserId": 4095338, "OwnerUserId": 4095338.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2947278.0, "CurrentDatasourceVersionId": 2994798.0, "ForumId": 1255272, "Type": 2, "CreationDate": "03/28/2021 04:53:38", "LastActivityDate": "03/28/2021", "TotalViews": 20785, "TotalDownloads": 2231, "TotalVotes": 38, "TotalKernels": 14}]
[{"Id": 4095338, "UserName": "sagyamthapa", "DisplayName": "Sagyam Thapa", "RegisterDate": "11/24/2019", "PerformanceTier": 1}]
import pandas as pd import numpy as np import os import keras import matplotlib.pyplot as plt from keras.layers import ( Dense, Dropout, Flatten, ZeroPadding2D, Conv2D, MaxPooling2D, Activation, GlobalAveragePooling2D, ) from keras.preprocessing import image from keras.applications.mobilenet import preprocess_input from keras.preprocessing.image import ImageDataGenerator from keras.models import Model, Sequential from keras.optimizers import Adam from sklearn.model_selection import train_test_split from keras.callbacks import EarlyStopping, ReduceLROnPlateau from sklearn.utils import class_weight # splits the dataset into the three sets based on the specified ratio, # which in this case is 60% training, 20% validation, and 20% test import splitfolders splitfolders.ratio( "../input/handwritten-math-symbols/dataset", output="./", seed=1337, ratio=(0.6, 0.2, 0.2), group_prefix=None, ) # default values # # Initialize ImageDataGenerator objects for training and validation sets, create generators with specified parameters.The images are resized to 224x224 pixels, converted to RGB color mode, and batched into groups of 24. import os NUM_CLASSES = len(os.listdir(r"./test")) train_datagen = ImageDataGenerator( preprocessing_function=preprocess_input ) # included in our dependencies train_generator = train_datagen.flow_from_directory( r"./train", # this is where you specify the path to the main data folder target_size=(224, 224), color_mode="rgb", batch_size=24, class_mode="categorical", shuffle=True, ) val_datagen = ImageDataGenerator( preprocessing_function=preprocess_input ) # included in our dependencies val_generator = val_datagen.flow_from_directory( r"./val", # this is where you specify the path to the main data folder target_size=(224, 224), color_mode="rgb", batch_size=24, class_mode="categorical", shuffle=True, ) # # Import pre-trained models, create sequential model with EfficientNetB6 and dense layer, print model summary. # from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.densenet import DenseNet201 from tensorflow.keras.applications.efficientnet import EfficientNetB6 md = EfficientNetB6( weights="imagenet", include_top=False, input_shape=(224, 224, 3), pooling="avg" ) from keras.utils import plot_model model = keras.models.Sequential( [md, keras.layers.Dense(NUM_CLASSES, activation="softmax")] ) # summarize layers print(model.summary()) # # Define callbacks, compile model, and train on generator data with specified parameters and callbacks. # earlystop = EarlyStopping(patience=3) learning_rate_reduction = ReduceLROnPlateau( monitor="loss", patience=2, verbose=1, factor=0.1, min_lr=0.0000000001 ) callback = [learning_rate_reduction] model.compile( optimizer=Adam(lr=0.00001), loss="categorical_crossentropy", metrics=["accuracy"] ) step_size_train = train_generator.n // train_generator.batch_size step_size_val = val_generator.n // val_generator.batch_size history = model.fit_generator( generator=train_generator, steps_per_epoch=step_size_train, validation_data=val_generator, validation_steps=step_size_val, epochs=25, callbacks=callback, )
false
0
982
0
1,526
982
129550144
# ## 1 Парсинг данных и предобработка данных # Импорт библиотек import pandas as pd import io import os import glob import docx import json from bs4 import BeautifulSoup import requests from datetime import datetime import string import re import nltk import pymorphy2 from nltk.corpus import stopwords from nltk.tokenize import word_tokenize nltk.download("word_tokenize") nltk.download("punkt") from nltk.stem import SnowballStemmer from nltk.stem import WordNetLemmatizer import warnings warnings.filterwarnings("ignore") from tqdm import tqdm import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import pylab import scipy.stats as stats import nltk from nltk import ngrams import json from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from gensim.models import Word2Vec from scipy.stats import shapiro from scipy.stats import normaltest from scipy.stats import anderson import warnings warnings.filterwarnings("ignore") from tqdm import tqdm # pip install python-docx # nltk.download('averaged_perceptron_tagger_ru') # ## 1.1 Парсинг данных condidates_path = r"Condidates.docx" doc = docx.Document(condidates_path) all_paras = doc.paragraphs len(all_paras) # Возьмем из документа названия компаний condidates = [] i = 1 for para in all_paras: print(i, para.text) i += 1 if para.text != "": condidates.append(para.text.rstrip()) len(condidates) print(condidates, len(condidates)) # Посмотрим каких компаний нет в папке Data all_json = glob.glob(r"Data\*.json") json_names = [os.path.basename(x.replace(".json", "")) for x in all_json] print(json_names, len(json_names)) condidates_pass = list(set(condidates) - set(json_names)) condidates_pass # У двух компаний ('Skillbox' и 'Проект по использованию технологий компьютерного зрения на базе искусственного интеллекта (ИИ) для анализа медицинских изображений') не совпадали названия. Названия были изменены в папке Data # ### 1.1.1 Парсинг Json articles = [] for json_path in all_json: with open(json_path, "r", encoding="utf-8") as f: data = json.load(f) print(json_path) i = 0 # Информаци о компании if data["info"] is not None: company_rating = data["info"]["rate"] company_activity = data["info"]["industries"] company_description = data["info"]["about"] else: company_rating = None company_activity = None company_description = None # Инфлормация из статьи for article in data["refs"]: if article is not None: article_text = article[0] article_date = article[1]["day"] + " " + article[1]["month"] articles.append( { "company_name": os.path.basename( json_path.replace(".json", "") ), "company_description": company_description, "company_activity": company_activity, "company_rating": company_rating, "article_text": article_text, "date_publish": article_date, } ) i += 1 print("Количество статей: ", i) print("Общее количество статей: ", len(articles)) df = pd.DataFrame(data=articles) df df.info() df.to_csv("articles.csv") companies_fill = list(df[df["company_rating"].notna()]["company_name"].unique()) companies_fill # Компании с пустыми значениями companies_pass = list(df[df["company_rating"].isna()]["company_name"].unique()) companies_pass # Из Json файлов удалось вытащить 1112 статьи, только две компании ('Skillbox', 'Иннотех') содержало в себе иформацию о компании # Найдем компании с недостающими данными на сайте Хабр for company in companies_pass: company = company.replace(" ", "%20") url = ( "https://habr.com/ru/search/?q=" + company + "&target_type=companies&order=relevance" ) print(url) page = requests.get(url) print(page.status_code) soup = BeautifulSoup(page.text, "html.parser") company_div = soup.find_all( "div", class_="tm-search-companies__item tm-search-companies__item_inlined" ) print(company_div) # Ни одной компании с пустыми значениями из папки Data не было найдено на Хабре # ### 1.1.2 Парсинг сайта # Парсинг статей недостающих компаний с Хабра condidates_pass for company in condidates_pass: company = company.replace(" ", "%20") url = ( "https://habr.com/ru/search/?q=" + company + "&target_type=companies&order=relevance" ) print(url) page = requests.get(url) print(page.status_code) soup = BeautifulSoup(page.text, "html.parser") company_div = soup.find_all( "div", class_="tm-search-companies__item tm-search-companies__item_inlined" ) if len(company_div) == 0: print(company_div) else: for c in company_div: print(c.find("a", class_="tm-company-snippet__title").text) # Из оставшихся компаний только 'СберМаркет' и 'Нетология' есть на Хабре # https://habr.com/ru/companies/netologyru/articles/page # https://habr.com/ru/companies/sbermarket/articles/page url_companies = [ r"https://habr.com/ru/companies/netologyru/articles/page", r"https://habr.com/ru/companies/sbermarket/articles/page", ] articles2 = { "company_name": [], "company_description": [], "company_activity": [], "company_rating": [], "article_text": [], "date_publish": [], } for url_company in url_companies: a = True pagenum = 1 for i in range(11): url = url_company + str(pagenum) + "/" page = requests.get(url) print(url) if page.status_code == 200: soup = BeautifulSoup(page.text, "html.parser") pages = soup.find_all("h2", class_="tm-title tm-title_h2") for i in pages: url2 = "https://habr.com" + str(i.a.get("href")) page = requests.get(url2) article = BeautifulSoup(page.text, "html.parser") print(url2) if a: company_name = soup.find("a", class_="tm-company-card__name") articles2["company_name"].append(company_name.text) url = "https://habr.com" + str(company_name.get("href")) company_page = requests.get(url) company = BeautifulSoup(company_page.text, "html.parser") if a: company_activity = company.find( "div", class_="tm-company-profile__categories" ) activity_clear = " ".join(company_activity.text.split()) articles2["company_activity"].append(activity_clear) if a: company_description = soup.find( "div", class_="tm-company-card__description" ) articles2["company_description"].append(company_description.text) if a: company_rating = soup.find( "span", class_="tm-votes-lever__score-counter tm-votes-lever__score-counter tm-votes-lever__score-counter_rating", ) a = False articles2["company_rating"].append(company_rating.text) data_publish = article.find( "span", class_="tm-article-datetime-published" ) if ":" in data_publish.text: articles2["date_publish"].append(data_publish.text) else: articles2["date_publish"].append( datetime.today().strftime("%Y-%m-%d") ) text_article = article.find( "div", class_="article-formatted-body article-formatted-body article-formatted-body_version-1", ) if text_article == None: text_article = article.find( "div", class_="article-formatted-body article-formatted-body article-formatted-body_version-2", ) text_article_clear = " ".join(text_article.text.split()) articles2["article_text"].append(text_article_clear) pagenum = pagenum + 1 df2 = pd.DataFrame(articles2) df2 df2.to_csv("articles2.csv") # -------------------------------------------------Парсинг с публикаций----------------------------------------------------- articles_list = [] for condidate in tqdm(condidates_pass): condidate = condidate.replace(" ", "%20") pagenum = 1 for i in tqdm(range(50)): url = ( "https://habr.com/ru/search/page" + str(pagenum) + "/?q=" + condidate + "&target_type=posts&order=relevance" ) page = requests.get(url) # print(url) # print(page.status_code) if page.status_code == 200: soup = BeautifulSoup(page.text, "html.parser") pages = soup.find_all("h2", class_="tm-title tm-title_h2") for i in pages: url2 = "https://habr.com" + str(i.a.get("href")) page2 = requests.get(url2) article = BeautifulSoup(page2.text, "html.parser") # print('url2', url2) # print(article) if url2 != "https://habr.com/ru/companies/2035_university/news/561404/": name_company = article.find( "div", class_="tm-company-snippet__title" ) # print('name_company', name_company) if name_company != None: # print(url2) name_company = name_company.text # print(page.status_code) if page.status_code != 404: # Название компании # company_name = soup.find('a', class_='tm-company-card__name') company_name = condidate.replace("%20", " ") # Сфера деятельности компании if name_company == condidate: url = "https://habr.com" + str(company_name.get("href")) company_page = requests.get(url) company = BeautifulSoup(company_page.text, "html.parser") company_activity = company.find( "div", class_="tm-company-profile__categories" ) activity_clear = " ".join(company_activity.text.split()) else: activity_clear = None # Описание компании if name_company == condidate: company_description = company.find( "div", class_="tm-company-card__description" ).text else: company_description = None # Рейтинг компании if name_company == condidate: company_rating = company.find( "span", class_="tm-votes-lever__score-counter tm-votes-lever__score-counter tm-votes-lever__score-counter_rating", ).text else: company_rating = None # Дата публикации data_publish = article.find( "span", class_="tm-article-datetime-published" ) # print('data_publish', data_publish.text) if data_publish != None: if ":" in data_publish.text: date_publish = data_publish.text else: date_publish = datetime.today().strftime("%Y-%m-%d") else: date_publish = None print(url2) # Текст статьи text_article = article.find( "div", class_="article-formatted-body article-formatted-body article-formatted-body_version-1", ) if text_article == None: text_article = article.find( "div", class_="article-formatted-body article-formatted-body article-formatted-body_version-2", ) if text_article != None: text_article_clear = " ".join(text_article.text.split()) else: print(url2) text_article_clear = None article_dict = { "company_name": company_name, "company_description": company_description, "company_activity": activity_clear, "company_rating": company_rating, "article_text": text_article_clear, "date_publish": date_publish, } articles_list.append(article_dict) pagenum = pagenum + 1 df3 = pd.DataFrame.from_records(articles_list) df3 df3.info() url2 = r"https://habr.com/ru/companies/2035_university/news/561404/" page = requests.get(url2) article = BeautifulSoup(page.text, "html.parser") print(url2) df3["company_name"].unique() df3.to_csv("articles3.csv") # -------------------------------------------------------------------------------------------------------------------------- # Объединим полученые датафреймы articles_df = pd.concat([df, df2, df3], ignore_index=True) articles_df articles_df.info() # ### 1.1.3 Обработка пропусков и дубликатов duplicates = articles_df.duplicated(subset=["company_name", "article_text"]) num_duplicates = duplicates.sum() print(f"Количество дубликатов по полям company_name и article_text: {num_duplicates}") duplicates = articles_df.duplicated(subset=["article_text"]) num_duplicates = duplicates.sum() print(f"Количество дубликатов по полям article_text: {num_duplicates}") import numpy as np db = np.where(duplicates == True) articles_df.drop_duplicates( subset=["company_name", "article_text"], keep="first", inplace=True ) list(articles_df["company_name"].unique()) described_companies = articles_df.loc[ articles_df["company_description"].notnull(), "company_name" ].unique() described_companies activity_companies = articles_df.loc[ articles_df["company_activity"].notnull(), "company_name" ].unique() activity_companies rating_companies = articles_df.loc[ articles_df["company_rating"].notnull(), "company_name" ].unique() rating_companies for company in described_companies: print(company) description = ( articles_df.loc[articles_df["company_name"] == company, "company_description"] .dropna() .iloc[0] ) articles_df.loc[ (articles_df["company_name"] == company) & (articles_df["company_description"].isnull()), "company_description", ] = description for company in activity_companies: print(company) activity = ( articles_df.loc[articles_df["company_name"] == company, "company_activity"] .dropna() .iloc[0] ) articles_df.loc[ (articles_df["company_name"] == company) & (articles_df["company_activity"].isnull()), "company_activity", ] = activity for company in rating_companies: print(company) rating = ( articles_df.loc[articles_df["company_name"] == company, "company_rating"] .dropna() .iloc[0] ) articles_df.loc[ (articles_df["company_name"] == company) & (articles_df["company_rating"].isnull()), "company_rating", ] = rating articles_df = articles_df.dropna(subset=["article_text"]) articles_df.to_csv("articles_full.csv") # После парсинга мы получили датафрейм с 4277 статьми и 6 признаками: Название компании, описание компании, сфера деятельности, рейтинг компании, текст статьи и дата публикации. Большинство данных взятых из Json файлов не содержало информации о компании, также этих компаний отсутствовали на Хабре. Записи с пустыми значениями и дубликаты были удалены. # ## 1.2 Формирование структуры набора данных articles_df = pd.read_csv("articles_full.csv") articles_df_clear = articles_df.drop( [ "Unnamed: 0", "Unnamed: 0.1", "company_description", "company_activity", "company_rating", "date_publish", ], axis=1, ) # Признаки company_description, company_activity и company_rating имеют большое количество пропущенных значений. date_publish не будет использоваться в обучении articles_df_clear articles_df_clear.info() # ## 1.3 Предварительная обработка текстовых данных morph = pymorphy2.MorphAnalyzer() stopword = nltk.corpus.stopwords.words("russian") # Дополним наши пустые слова stopword.extend( [ "либо", "это", "мб", "далее", "дв", "свой", "ваш", "всё", "очень", "её", "ещё", "вообще", "наш", "который", ] ) def preprocess_text(data, stopwords=stopword): text = re.sub("ё", "е", data.lower()) text = re.sub("й", "и", text) text = re.sub(r"([.,!?])", r" \1 ", text) text = re.sub(r"[^а-яА-Я\s]+", "", text) text = text.strip() text = [w for w in text.split() if w not in stopwords] text = [w for w in text if len(w) >= 3] return " ".join(text) def lemmatization_text(data, morph=morph): result = " ".join([morph.parse(x)[0].normal_form for x in data.split()]) return result def get_result(data, morph=morph, stopwords=stopword): result = preprocess_text(data=data) result = lemmatization_text(result) return result def transform_data(data: pd.Series) -> list: result = [get_result(data=i) for i in tqdm(data)] return result text = articles_df_clear["article_text"] result_df = tqdm(transform_data(text)) articles_df_clear["lematize_text"] = result_df articles_df_clear result_df2 = [word_tokenize(text) for text in result_df] articles_df_clear["tokenize_text"] = result_df2 articles_df_clear result_df3 = [nltk.pos_tag(text, lang="rus") for text in tqdm(result_df2)] articles_df_clear["pos_tag_text"] = result_df3 articles_df_clear.info() articles_df_clear.to_csv("articles_df.csv") # Была выполнена предобработка текста статьи. Предобработка включает в себя удаление всех символов, кроме букв русского алфавита, удаление стоп слов и лематизация текста. Проведена токенизация и маркировка частей речи текста каждой статьи # ### 1.4 Поиск n-грамм. Векторизация текстов df = pd.read_csv("articles_df.csv") df df.info() # После чтения файла пропалин значения лемматизированного текста у некоторых записей. df = df.dropna(subset=["lematize_text"]) df.drop(["Unnamed: 0"], axis=1, inplace=True) df.head(5) text = list(df["lematize_text"]) text[0] vocabVect = CountVectorizer() vocabVect.fit(text) corpusVocab = vocabVect.vocabulary_ print("Количество признаков - {}".format(len(corpusVocab))) for i in list(corpusVocab)[1:10]: print("{}={}".format(i, corpusVocab[i])) test_features = vocabVect.transform(text) vocabVect.get_feature_names_out()[100:120] def find_ngrams(text, n): n_grams = ngrams(text.split(), n) return [" ".join(grams) for grams in n_grams] df["bigrams"] = df["lematize_text"].apply(lambda x: find_ngrams(x, 2)) df["trigrams"] = df["tokenize_text"].apply(lambda x: find_ngrams(x, 3)) df.head() vectorizer = TfidfVectorizer() tfidf_matrix = vectorizer.fit_transform(df["lematize_text"]) keywords = [] for i in tqdm(range(len(df))): tfidf_scores = tfidf_matrix[i].todense() scores_list = tfidf_scores.tolist()[0] words = vectorizer.get_feature_names_out() key_words_df = pd.DataFrame({"word": words, "score": scores_list}) key_words_df = key_words_df.sort_values(by="score", ascending=False) keywords.append(list(key_words_df["word"][:5])) df["keywords"] = keywords # ## 1.5 Разведочный анализ result_dict = {} with open("Target.json", "r", encoding="utf-8") as f: data = json.load(f) for entry in data["text"]: company = entry["Company"] nomination = entry["Nominations"] result_dict[company] = nomination result_dict df["nomination"] = df["company_name"].map(result_dict) df.head(5) df["company_name"].unique() df["nomination"].unique() df.info() # проверка нормальности распределения целевой переменной from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df["nomination_encoded"] = le.fit_transform(df["nomination"]) sns.distplot(df["nomination_encoded"]) plt.title("Распределения данных") plt.show() sns.boxplot(df["nomination_encoded"]) stats.probplot(df["nomination_encoded"], dist="norm", plot=pylab) pylab.show() stat, p = shapiro(df["nomination_encoded"]) print("stat=%.3f, p=%.3f\n" % (stat, p)) if p > 0.05: print("Probably Gaussian") else: print("Probably not Gaussian") stat, p = normaltest(df["nomination_encoded"]) print("stat=%.3f, p=%.3f\n" % (stat, p)) if p > 0.05: print("Probably Gaussian") else: print("Probably not Gaussian") result = anderson(df["nomination_encoded"]) print("stat=%.3f" % (result.statistic)) for i in range(len(result.critical_values)): sig_lev, crit_val = result.significance_level[i], result.critical_values[i] if result.statistic < crit_val: print( f"probably Gaussian : {crit_val} critical value at {sig_lev} level of significance" ) else: print( f"Probably not Gaussian : {crit_val} critical value at {sig_lev} level of significance" ) # Распределение целевой переменной не является нормальным. df.to_csv("articles.csv")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/550/129550144.ipynb
null
null
[{"Id": 129550144, "ScriptId": 38521899, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13417746, "CreationDate": "05/14/2023 18:37:58", "VersionNumber": 1.0, "Title": "Report1-VD-djostit", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 615.0, "LinesInsertedFromPrevious": 615.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## 1 Парсинг данных и предобработка данных # Импорт библиотек import pandas as pd import io import os import glob import docx import json from bs4 import BeautifulSoup import requests from datetime import datetime import string import re import nltk import pymorphy2 from nltk.corpus import stopwords from nltk.tokenize import word_tokenize nltk.download("word_tokenize") nltk.download("punkt") from nltk.stem import SnowballStemmer from nltk.stem import WordNetLemmatizer import warnings warnings.filterwarnings("ignore") from tqdm import tqdm import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import pylab import scipy.stats as stats import nltk from nltk import ngrams import json from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from gensim.models import Word2Vec from scipy.stats import shapiro from scipy.stats import normaltest from scipy.stats import anderson import warnings warnings.filterwarnings("ignore") from tqdm import tqdm # pip install python-docx # nltk.download('averaged_perceptron_tagger_ru') # ## 1.1 Парсинг данных condidates_path = r"Condidates.docx" doc = docx.Document(condidates_path) all_paras = doc.paragraphs len(all_paras) # Возьмем из документа названия компаний condidates = [] i = 1 for para in all_paras: print(i, para.text) i += 1 if para.text != "": condidates.append(para.text.rstrip()) len(condidates) print(condidates, len(condidates)) # Посмотрим каких компаний нет в папке Data all_json = glob.glob(r"Data\*.json") json_names = [os.path.basename(x.replace(".json", "")) for x in all_json] print(json_names, len(json_names)) condidates_pass = list(set(condidates) - set(json_names)) condidates_pass # У двух компаний ('Skillbox' и 'Проект по использованию технологий компьютерного зрения на базе искусственного интеллекта (ИИ) для анализа медицинских изображений') не совпадали названия. Названия были изменены в папке Data # ### 1.1.1 Парсинг Json articles = [] for json_path in all_json: with open(json_path, "r", encoding="utf-8") as f: data = json.load(f) print(json_path) i = 0 # Информаци о компании if data["info"] is not None: company_rating = data["info"]["rate"] company_activity = data["info"]["industries"] company_description = data["info"]["about"] else: company_rating = None company_activity = None company_description = None # Инфлормация из статьи for article in data["refs"]: if article is not None: article_text = article[0] article_date = article[1]["day"] + " " + article[1]["month"] articles.append( { "company_name": os.path.basename( json_path.replace(".json", "") ), "company_description": company_description, "company_activity": company_activity, "company_rating": company_rating, "article_text": article_text, "date_publish": article_date, } ) i += 1 print("Количество статей: ", i) print("Общее количество статей: ", len(articles)) df = pd.DataFrame(data=articles) df df.info() df.to_csv("articles.csv") companies_fill = list(df[df["company_rating"].notna()]["company_name"].unique()) companies_fill # Компании с пустыми значениями companies_pass = list(df[df["company_rating"].isna()]["company_name"].unique()) companies_pass # Из Json файлов удалось вытащить 1112 статьи, только две компании ('Skillbox', 'Иннотех') содержало в себе иформацию о компании # Найдем компании с недостающими данными на сайте Хабр for company in companies_pass: company = company.replace(" ", "%20") url = ( "https://habr.com/ru/search/?q=" + company + "&target_type=companies&order=relevance" ) print(url) page = requests.get(url) print(page.status_code) soup = BeautifulSoup(page.text, "html.parser") company_div = soup.find_all( "div", class_="tm-search-companies__item tm-search-companies__item_inlined" ) print(company_div) # Ни одной компании с пустыми значениями из папки Data не было найдено на Хабре # ### 1.1.2 Парсинг сайта # Парсинг статей недостающих компаний с Хабра condidates_pass for company in condidates_pass: company = company.replace(" ", "%20") url = ( "https://habr.com/ru/search/?q=" + company + "&target_type=companies&order=relevance" ) print(url) page = requests.get(url) print(page.status_code) soup = BeautifulSoup(page.text, "html.parser") company_div = soup.find_all( "div", class_="tm-search-companies__item tm-search-companies__item_inlined" ) if len(company_div) == 0: print(company_div) else: for c in company_div: print(c.find("a", class_="tm-company-snippet__title").text) # Из оставшихся компаний только 'СберМаркет' и 'Нетология' есть на Хабре # https://habr.com/ru/companies/netologyru/articles/page # https://habr.com/ru/companies/sbermarket/articles/page url_companies = [ r"https://habr.com/ru/companies/netologyru/articles/page", r"https://habr.com/ru/companies/sbermarket/articles/page", ] articles2 = { "company_name": [], "company_description": [], "company_activity": [], "company_rating": [], "article_text": [], "date_publish": [], } for url_company in url_companies: a = True pagenum = 1 for i in range(11): url = url_company + str(pagenum) + "/" page = requests.get(url) print(url) if page.status_code == 200: soup = BeautifulSoup(page.text, "html.parser") pages = soup.find_all("h2", class_="tm-title tm-title_h2") for i in pages: url2 = "https://habr.com" + str(i.a.get("href")) page = requests.get(url2) article = BeautifulSoup(page.text, "html.parser") print(url2) if a: company_name = soup.find("a", class_="tm-company-card__name") articles2["company_name"].append(company_name.text) url = "https://habr.com" + str(company_name.get("href")) company_page = requests.get(url) company = BeautifulSoup(company_page.text, "html.parser") if a: company_activity = company.find( "div", class_="tm-company-profile__categories" ) activity_clear = " ".join(company_activity.text.split()) articles2["company_activity"].append(activity_clear) if a: company_description = soup.find( "div", class_="tm-company-card__description" ) articles2["company_description"].append(company_description.text) if a: company_rating = soup.find( "span", class_="tm-votes-lever__score-counter tm-votes-lever__score-counter tm-votes-lever__score-counter_rating", ) a = False articles2["company_rating"].append(company_rating.text) data_publish = article.find( "span", class_="tm-article-datetime-published" ) if ":" in data_publish.text: articles2["date_publish"].append(data_publish.text) else: articles2["date_publish"].append( datetime.today().strftime("%Y-%m-%d") ) text_article = article.find( "div", class_="article-formatted-body article-formatted-body article-formatted-body_version-1", ) if text_article == None: text_article = article.find( "div", class_="article-formatted-body article-formatted-body article-formatted-body_version-2", ) text_article_clear = " ".join(text_article.text.split()) articles2["article_text"].append(text_article_clear) pagenum = pagenum + 1 df2 = pd.DataFrame(articles2) df2 df2.to_csv("articles2.csv") # -------------------------------------------------Парсинг с публикаций----------------------------------------------------- articles_list = [] for condidate in tqdm(condidates_pass): condidate = condidate.replace(" ", "%20") pagenum = 1 for i in tqdm(range(50)): url = ( "https://habr.com/ru/search/page" + str(pagenum) + "/?q=" + condidate + "&target_type=posts&order=relevance" ) page = requests.get(url) # print(url) # print(page.status_code) if page.status_code == 200: soup = BeautifulSoup(page.text, "html.parser") pages = soup.find_all("h2", class_="tm-title tm-title_h2") for i in pages: url2 = "https://habr.com" + str(i.a.get("href")) page2 = requests.get(url2) article = BeautifulSoup(page2.text, "html.parser") # print('url2', url2) # print(article) if url2 != "https://habr.com/ru/companies/2035_university/news/561404/": name_company = article.find( "div", class_="tm-company-snippet__title" ) # print('name_company', name_company) if name_company != None: # print(url2) name_company = name_company.text # print(page.status_code) if page.status_code != 404: # Название компании # company_name = soup.find('a', class_='tm-company-card__name') company_name = condidate.replace("%20", " ") # Сфера деятельности компании if name_company == condidate: url = "https://habr.com" + str(company_name.get("href")) company_page = requests.get(url) company = BeautifulSoup(company_page.text, "html.parser") company_activity = company.find( "div", class_="tm-company-profile__categories" ) activity_clear = " ".join(company_activity.text.split()) else: activity_clear = None # Описание компании if name_company == condidate: company_description = company.find( "div", class_="tm-company-card__description" ).text else: company_description = None # Рейтинг компании if name_company == condidate: company_rating = company.find( "span", class_="tm-votes-lever__score-counter tm-votes-lever__score-counter tm-votes-lever__score-counter_rating", ).text else: company_rating = None # Дата публикации data_publish = article.find( "span", class_="tm-article-datetime-published" ) # print('data_publish', data_publish.text) if data_publish != None: if ":" in data_publish.text: date_publish = data_publish.text else: date_publish = datetime.today().strftime("%Y-%m-%d") else: date_publish = None print(url2) # Текст статьи text_article = article.find( "div", class_="article-formatted-body article-formatted-body article-formatted-body_version-1", ) if text_article == None: text_article = article.find( "div", class_="article-formatted-body article-formatted-body article-formatted-body_version-2", ) if text_article != None: text_article_clear = " ".join(text_article.text.split()) else: print(url2) text_article_clear = None article_dict = { "company_name": company_name, "company_description": company_description, "company_activity": activity_clear, "company_rating": company_rating, "article_text": text_article_clear, "date_publish": date_publish, } articles_list.append(article_dict) pagenum = pagenum + 1 df3 = pd.DataFrame.from_records(articles_list) df3 df3.info() url2 = r"https://habr.com/ru/companies/2035_university/news/561404/" page = requests.get(url2) article = BeautifulSoup(page.text, "html.parser") print(url2) df3["company_name"].unique() df3.to_csv("articles3.csv") # -------------------------------------------------------------------------------------------------------------------------- # Объединим полученые датафреймы articles_df = pd.concat([df, df2, df3], ignore_index=True) articles_df articles_df.info() # ### 1.1.3 Обработка пропусков и дубликатов duplicates = articles_df.duplicated(subset=["company_name", "article_text"]) num_duplicates = duplicates.sum() print(f"Количество дубликатов по полям company_name и article_text: {num_duplicates}") duplicates = articles_df.duplicated(subset=["article_text"]) num_duplicates = duplicates.sum() print(f"Количество дубликатов по полям article_text: {num_duplicates}") import numpy as np db = np.where(duplicates == True) articles_df.drop_duplicates( subset=["company_name", "article_text"], keep="first", inplace=True ) list(articles_df["company_name"].unique()) described_companies = articles_df.loc[ articles_df["company_description"].notnull(), "company_name" ].unique() described_companies activity_companies = articles_df.loc[ articles_df["company_activity"].notnull(), "company_name" ].unique() activity_companies rating_companies = articles_df.loc[ articles_df["company_rating"].notnull(), "company_name" ].unique() rating_companies for company in described_companies: print(company) description = ( articles_df.loc[articles_df["company_name"] == company, "company_description"] .dropna() .iloc[0] ) articles_df.loc[ (articles_df["company_name"] == company) & (articles_df["company_description"].isnull()), "company_description", ] = description for company in activity_companies: print(company) activity = ( articles_df.loc[articles_df["company_name"] == company, "company_activity"] .dropna() .iloc[0] ) articles_df.loc[ (articles_df["company_name"] == company) & (articles_df["company_activity"].isnull()), "company_activity", ] = activity for company in rating_companies: print(company) rating = ( articles_df.loc[articles_df["company_name"] == company, "company_rating"] .dropna() .iloc[0] ) articles_df.loc[ (articles_df["company_name"] == company) & (articles_df["company_rating"].isnull()), "company_rating", ] = rating articles_df = articles_df.dropna(subset=["article_text"]) articles_df.to_csv("articles_full.csv") # После парсинга мы получили датафрейм с 4277 статьми и 6 признаками: Название компании, описание компании, сфера деятельности, рейтинг компании, текст статьи и дата публикации. Большинство данных взятых из Json файлов не содержало информации о компании, также этих компаний отсутствовали на Хабре. Записи с пустыми значениями и дубликаты были удалены. # ## 1.2 Формирование структуры набора данных articles_df = pd.read_csv("articles_full.csv") articles_df_clear = articles_df.drop( [ "Unnamed: 0", "Unnamed: 0.1", "company_description", "company_activity", "company_rating", "date_publish", ], axis=1, ) # Признаки company_description, company_activity и company_rating имеют большое количество пропущенных значений. date_publish не будет использоваться в обучении articles_df_clear articles_df_clear.info() # ## 1.3 Предварительная обработка текстовых данных morph = pymorphy2.MorphAnalyzer() stopword = nltk.corpus.stopwords.words("russian") # Дополним наши пустые слова stopword.extend( [ "либо", "это", "мб", "далее", "дв", "свой", "ваш", "всё", "очень", "её", "ещё", "вообще", "наш", "который", ] ) def preprocess_text(data, stopwords=stopword): text = re.sub("ё", "е", data.lower()) text = re.sub("й", "и", text) text = re.sub(r"([.,!?])", r" \1 ", text) text = re.sub(r"[^а-яА-Я\s]+", "", text) text = text.strip() text = [w for w in text.split() if w not in stopwords] text = [w for w in text if len(w) >= 3] return " ".join(text) def lemmatization_text(data, morph=morph): result = " ".join([morph.parse(x)[0].normal_form for x in data.split()]) return result def get_result(data, morph=morph, stopwords=stopword): result = preprocess_text(data=data) result = lemmatization_text(result) return result def transform_data(data: pd.Series) -> list: result = [get_result(data=i) for i in tqdm(data)] return result text = articles_df_clear["article_text"] result_df = tqdm(transform_data(text)) articles_df_clear["lematize_text"] = result_df articles_df_clear result_df2 = [word_tokenize(text) for text in result_df] articles_df_clear["tokenize_text"] = result_df2 articles_df_clear result_df3 = [nltk.pos_tag(text, lang="rus") for text in tqdm(result_df2)] articles_df_clear["pos_tag_text"] = result_df3 articles_df_clear.info() articles_df_clear.to_csv("articles_df.csv") # Была выполнена предобработка текста статьи. Предобработка включает в себя удаление всех символов, кроме букв русского алфавита, удаление стоп слов и лематизация текста. Проведена токенизация и маркировка частей речи текста каждой статьи # ### 1.4 Поиск n-грамм. Векторизация текстов df = pd.read_csv("articles_df.csv") df df.info() # После чтения файла пропалин значения лемматизированного текста у некоторых записей. df = df.dropna(subset=["lematize_text"]) df.drop(["Unnamed: 0"], axis=1, inplace=True) df.head(5) text = list(df["lematize_text"]) text[0] vocabVect = CountVectorizer() vocabVect.fit(text) corpusVocab = vocabVect.vocabulary_ print("Количество признаков - {}".format(len(corpusVocab))) for i in list(corpusVocab)[1:10]: print("{}={}".format(i, corpusVocab[i])) test_features = vocabVect.transform(text) vocabVect.get_feature_names_out()[100:120] def find_ngrams(text, n): n_grams = ngrams(text.split(), n) return [" ".join(grams) for grams in n_grams] df["bigrams"] = df["lematize_text"].apply(lambda x: find_ngrams(x, 2)) df["trigrams"] = df["tokenize_text"].apply(lambda x: find_ngrams(x, 3)) df.head() vectorizer = TfidfVectorizer() tfidf_matrix = vectorizer.fit_transform(df["lematize_text"]) keywords = [] for i in tqdm(range(len(df))): tfidf_scores = tfidf_matrix[i].todense() scores_list = tfidf_scores.tolist()[0] words = vectorizer.get_feature_names_out() key_words_df = pd.DataFrame({"word": words, "score": scores_list}) key_words_df = key_words_df.sort_values(by="score", ascending=False) keywords.append(list(key_words_df["word"][:5])) df["keywords"] = keywords # ## 1.5 Разведочный анализ result_dict = {} with open("Target.json", "r", encoding="utf-8") as f: data = json.load(f) for entry in data["text"]: company = entry["Company"] nomination = entry["Nominations"] result_dict[company] = nomination result_dict df["nomination"] = df["company_name"].map(result_dict) df.head(5) df["company_name"].unique() df["nomination"].unique() df.info() # проверка нормальности распределения целевой переменной from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df["nomination_encoded"] = le.fit_transform(df["nomination"]) sns.distplot(df["nomination_encoded"]) plt.title("Распределения данных") plt.show() sns.boxplot(df["nomination_encoded"]) stats.probplot(df["nomination_encoded"], dist="norm", plot=pylab) pylab.show() stat, p = shapiro(df["nomination_encoded"]) print("stat=%.3f, p=%.3f\n" % (stat, p)) if p > 0.05: print("Probably Gaussian") else: print("Probably not Gaussian") stat, p = normaltest(df["nomination_encoded"]) print("stat=%.3f, p=%.3f\n" % (stat, p)) if p > 0.05: print("Probably Gaussian") else: print("Probably not Gaussian") result = anderson(df["nomination_encoded"]) print("stat=%.3f" % (result.statistic)) for i in range(len(result.critical_values)): sig_lev, crit_val = result.significance_level[i], result.critical_values[i] if result.statistic < crit_val: print( f"probably Gaussian : {crit_val} critical value at {sig_lev} level of significance" ) else: print( f"Probably not Gaussian : {crit_val} critical value at {sig_lev} level of significance" ) # Распределение целевой переменной не является нормальным. df.to_csv("articles.csv")
false
0
6,422
0
6,422
6,422
129527065
<jupyter_start><jupyter_text>Solar Energy Production The hourly output at each of the city of Calgary's solar photovoltaic projects and the locations of City of Calgary solar photovoltaic installations. Hourly energy production data from sites with a "public_url" can be found [here](https://data.calgary.ca/Environment/Solar-Photovoltaic-Sites/csgq-e555). Kaggle dataset identifier: solar-energy-production <jupyter_script># # 🌤 Forecasting Solar Power Production 🌤 # ## Machine Learning vs Statistical Models # In this analysis I want to compare several approaches of forecasting time series data. The solar power production data seems very suitable, as it is available on a hourly basis and for a period of over 5 years. # Following this awesome tutorial, I evaluate 3 forcasting approaches: # 1. Direct Multi-step Forecast Strategy # 2. Recursive Multi-step Forecast # 3. Direct-Recursive Hybrid Strategies # As the underlying regression models I will use a Linear Regression and Light GBM. # To compare the performance of the ML models let's also compare them to a classical Auto ARIMA and Auto ETS model. # The `sktime` package comes handy for this analysis, as it offers a variety of predefined time series functions, which come in handy for this analysis. # ## Script Dependencies import pandas as pd from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error from lightgbm import LGBMRegressor import numpy as np from sktime.forecasting.compose import make_reduction from sktime.forecasting.model_selection import temporal_train_test_split from sktime.forecasting.arima import AutoARIMA from sktime.forecasting.ets import AutoETS import matplotlib.pyplot as plt import plotly.express as px # ## Data Import and Exploration energy_production = pd.read_csv( "/kaggle/input/solar-energy-production/Solar_Energy_Production.csv" ) energy_production["date"] = pd.to_datetime(energy_production["date"]) energy_production.head() # ## 11 Power Plants energy_production.groupby("name").agg({"date": [min, max]}) # all plants besides Telus Spark provide > 5 years data # # Modelling # ## Data Preparation ## Experiment Setup ### Data Parameters frequency = ["h", "d", "M"][2] plant_name = "Whitehorn Multi-Service Centre" ### Modelling Parameters window_size = 24 forecast_horizon = 12 year_start = 2017 model_log_transformed = False forecasting_approaches = ["direct", "recursive", "dirrec"] # , 'multioutput' regression_models = {"Lin_Reg": LinearRegression, "LGBM": LGBMRegressor} statistical_models = {"AutoARIMA": AutoARIMA, "AutoETS": AutoETS} ## Evaluation metrics = {"MAE": mean_absolute_error, "MSE": mean_squared_error} # ### Data Preprocessing plant_data = ( energy_production.loc[energy_production.name == plant_name, ["date", "kWh"]] .set_index("date") .resample(frequency) .sum() .resample(frequency) .asfreq() .fillna(0) ) plant_data.head() # ## Data Exploration # ### Seasonality from statsmodels.graphics.tsaplots import plot_acf plot_acf(plant_data.resample("M").sum()) # Show the AR as a plot plt.show() # ### Modelling Processing # + define window of past values used for prediction # + define how many points into the future should be forecasted series_data = plant_data.loc[plant_data.index.year >= year_start].kWh if model_log_transformed: series_data = series_data.apply(lambda x: np.log(x + 1)) train, test = temporal_train_test_split(series_data, test_size=forecast_horizon) fig, ax = plt.subplots(1, figsize=plt.figaspect(0.4)) train.plot(ax=ax, label="train") test.plot(ax=ax, label="test") plt.title("Solar Energy Production " + plant_name) ax.set(ylabel="kWh") ax.set_xlabel("Month") plt.legend() # ## Models Fitting and Prediction # ### 1. Machine Learning Models fh = list(range(1, forecast_horizon + 1)) evaluation_frame = test.to_frame().rename(columns={"kWh": "y_true"}).copy() for approach in forecasting_approaches: print(f"Fitting Models using {approach} method") for model_name in regression_models.keys(): print(f"# Fitting {model_name}") forecaster = make_reduction( regression_models[model_name](), window_length=window_size, strategy=approach, ) fit_kwargs = {} if approach == "recursive" else {"fh": fh} # Fit and predict forecaster.fit(train, **fit_kwargs) prediction = forecaster.predict(fh=fh) evaluation_frame[approach + "_" + model_name] = prediction print() if model_log_transformed: evaluation_frame = evaluation_frame.applymap(lambda x: np.exp(x) - 1) evaluation_frame.head() # # ### 2. Statistical Models for model_name in statistical_models.keys(): forecaster = statistical_models[model_name]() forecaster.fit(train) prediction = forecaster.predict(fh=fh) evaluation_frame[model_name] = prediction # # Models Evaluation plot_df = evaluation_frame.reset_index().melt(id_vars="date") px.line(plot_df, x="date", y="value", color="variable") models = [ model_name for model_name in evaluation_frame.columns if model_name != "y_true" ] metrics_frame = [] for metric_name in metrics.keys(): for model in models: metrics_frame.append( pd.DataFrame( { "Metric": [metric_name], "Model": [model], "Score": [ metrics[metric_name]( evaluation_frame["y_true"], evaluation_frame[model] ) ], } ) ) metrics_frame = pd.concat(metrics_frame) metrics_frame metric = "MAE" px.bar( metrics_frame.loc[metrics_frame.Metric == metric], x="Model", color="Model", y="Score", ) metric = "MSE" px.bar( metrics_frame.loc[metrics_frame.Metric == metric], x="Model", color="Model", y="Score", )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/527/129527065.ipynb
solar-energy-production
ivnlee
[{"Id": 129527065, "ScriptId": 38464467, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2378690, "CreationDate": "05/14/2023 14:45:54", "VersionNumber": 1.0, "Title": "\ud83c\udf24 Forecasting Solar Power Production", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 193.0, "LinesInsertedFromPrevious": 193.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185675069, "KernelVersionId": 129527065, "SourceDatasetVersionId": 5265653}]
[{"Id": 5265653, "DatasetId": 3064815, "DatasourceVersionId": 5338594, "CreatorUserId": 9865529, "LicenseName": "CC0: Public Domain", "CreationDate": "03/30/2023 01:50:29", "VersionNumber": 1.0, "Title": "Solar Energy Production", "Slug": "solar-energy-production", "Subtitle": "Hourly output at each of The City of Calgary's solar photovoltaic projects", "Description": "The hourly output at each of the city of Calgary's solar photovoltaic projects and the locations of City of Calgary solar photovoltaic installations. \n\nHourly energy production data from sites with a \"public_url\" can be found [here](https://data.calgary.ca/Environment/Solar-Photovoltaic-Sites/csgq-e555).", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3064815, "CreatorUserId": 9865529, "OwnerUserId": 9865529.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5265653.0, "CurrentDatasourceVersionId": 5338594.0, "ForumId": 3127581, "Type": 2, "CreationDate": "03/30/2023 01:50:29", "LastActivityDate": "03/30/2023", "TotalViews": 8432, "TotalDownloads": 897, "TotalVotes": 35, "TotalKernels": 2}]
[{"Id": 9865529, "UserName": "ivnlee", "DisplayName": "Ivan Lee", "RegisterDate": "03/08/2022", "PerformanceTier": 1}]
# # 🌤 Forecasting Solar Power Production 🌤 # ## Machine Learning vs Statistical Models # In this analysis I want to compare several approaches of forecasting time series data. The solar power production data seems very suitable, as it is available on a hourly basis and for a period of over 5 years. # Following this awesome tutorial, I evaluate 3 forcasting approaches: # 1. Direct Multi-step Forecast Strategy # 2. Recursive Multi-step Forecast # 3. Direct-Recursive Hybrid Strategies # As the underlying regression models I will use a Linear Regression and Light GBM. # To compare the performance of the ML models let's also compare them to a classical Auto ARIMA and Auto ETS model. # The `sktime` package comes handy for this analysis, as it offers a variety of predefined time series functions, which come in handy for this analysis. # ## Script Dependencies import pandas as pd from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error from lightgbm import LGBMRegressor import numpy as np from sktime.forecasting.compose import make_reduction from sktime.forecasting.model_selection import temporal_train_test_split from sktime.forecasting.arima import AutoARIMA from sktime.forecasting.ets import AutoETS import matplotlib.pyplot as plt import plotly.express as px # ## Data Import and Exploration energy_production = pd.read_csv( "/kaggle/input/solar-energy-production/Solar_Energy_Production.csv" ) energy_production["date"] = pd.to_datetime(energy_production["date"]) energy_production.head() # ## 11 Power Plants energy_production.groupby("name").agg({"date": [min, max]}) # all plants besides Telus Spark provide > 5 years data # # Modelling # ## Data Preparation ## Experiment Setup ### Data Parameters frequency = ["h", "d", "M"][2] plant_name = "Whitehorn Multi-Service Centre" ### Modelling Parameters window_size = 24 forecast_horizon = 12 year_start = 2017 model_log_transformed = False forecasting_approaches = ["direct", "recursive", "dirrec"] # , 'multioutput' regression_models = {"Lin_Reg": LinearRegression, "LGBM": LGBMRegressor} statistical_models = {"AutoARIMA": AutoARIMA, "AutoETS": AutoETS} ## Evaluation metrics = {"MAE": mean_absolute_error, "MSE": mean_squared_error} # ### Data Preprocessing plant_data = ( energy_production.loc[energy_production.name == plant_name, ["date", "kWh"]] .set_index("date") .resample(frequency) .sum() .resample(frequency) .asfreq() .fillna(0) ) plant_data.head() # ## Data Exploration # ### Seasonality from statsmodels.graphics.tsaplots import plot_acf plot_acf(plant_data.resample("M").sum()) # Show the AR as a plot plt.show() # ### Modelling Processing # + define window of past values used for prediction # + define how many points into the future should be forecasted series_data = plant_data.loc[plant_data.index.year >= year_start].kWh if model_log_transformed: series_data = series_data.apply(lambda x: np.log(x + 1)) train, test = temporal_train_test_split(series_data, test_size=forecast_horizon) fig, ax = plt.subplots(1, figsize=plt.figaspect(0.4)) train.plot(ax=ax, label="train") test.plot(ax=ax, label="test") plt.title("Solar Energy Production " + plant_name) ax.set(ylabel="kWh") ax.set_xlabel("Month") plt.legend() # ## Models Fitting and Prediction # ### 1. Machine Learning Models fh = list(range(1, forecast_horizon + 1)) evaluation_frame = test.to_frame().rename(columns={"kWh": "y_true"}).copy() for approach in forecasting_approaches: print(f"Fitting Models using {approach} method") for model_name in regression_models.keys(): print(f"# Fitting {model_name}") forecaster = make_reduction( regression_models[model_name](), window_length=window_size, strategy=approach, ) fit_kwargs = {} if approach == "recursive" else {"fh": fh} # Fit and predict forecaster.fit(train, **fit_kwargs) prediction = forecaster.predict(fh=fh) evaluation_frame[approach + "_" + model_name] = prediction print() if model_log_transformed: evaluation_frame = evaluation_frame.applymap(lambda x: np.exp(x) - 1) evaluation_frame.head() # # ### 2. Statistical Models for model_name in statistical_models.keys(): forecaster = statistical_models[model_name]() forecaster.fit(train) prediction = forecaster.predict(fh=fh) evaluation_frame[model_name] = prediction # # Models Evaluation plot_df = evaluation_frame.reset_index().melt(id_vars="date") px.line(plot_df, x="date", y="value", color="variable") models = [ model_name for model_name in evaluation_frame.columns if model_name != "y_true" ] metrics_frame = [] for metric_name in metrics.keys(): for model in models: metrics_frame.append( pd.DataFrame( { "Metric": [metric_name], "Model": [model], "Score": [ metrics[metric_name]( evaluation_frame["y_true"], evaluation_frame[model] ) ], } ) ) metrics_frame = pd.concat(metrics_frame) metrics_frame metric = "MAE" px.bar( metrics_frame.loc[metrics_frame.Metric == metric], x="Model", color="Model", y="Score", ) metric = "MSE" px.bar( metrics_frame.loc[metrics_frame.Metric == metric], x="Model", color="Model", y="Score", )
false
1
1,553
0
1,665
1,553
129807220
<jupyter_start><jupyter_text>Credit Card customers A manager at the bank is disturbed with more and more customers leaving their credit card services. They would really appreciate if one could predict for them who is gonna get churned so they can proactively go to the customer to provide them better services and turn customers' decisions in the opposite direction I got this dataset from a website with the URL as https://leaps.analyttica.com/home. I have been using this for a while to get datasets and accordingly work on them to produce fruitful results. The site explains how to solve a particular business problem. Now, this dataset consists of 10,000 customers mentioning their age, salary, marital_status, credit card limit, credit card category, etc. There are nearly 18 features. We have only 16.07% of customers who have churned. Thus, it's a bit difficult to train our model to predict churning customers. Kaggle dataset identifier: credit-card-customers <jupyter_script>import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns print("Setup Complete") my_filepath = "/kaggle/input/credit-card-customers/BankChurners.csv" # Membaca dataset df = pd.read_csv(my_filepath) df # Melihat informasi dataset df.info() # Melihat deskripsi dataset df.describe() # Melihat presentase churn rate nasabah kartu kredit plt.title("Churn Rate of Credit Card Service (Attrition_Flag)") plt.pie( df["Attrition_Flag"].value_counts(), labels=df["Attrition_Flag"].value_counts().index, autopct="%1.2f%%", ) plt.show() # Dari data yang diperoleh dari suatu bank karena banyaknya nasabah yang mengakhiri atau menghentikan layanan kartu kreditnya, diperoleh informasi bahwa ada sekitar 16.07% yakni 1627 nasabah (Attrited Customer) yang memilih mengakhiri atau menghentikan layanan kartu kreditnya. Hal ini dapat kita analisa lebih lanjut pada ***Credit Card Customers Dataset by Attrition Flag*** pada bagan dibawah ini: # # group by Attrition_Flag attrition_group = df.groupby("Attrition_Flag") # create subplots for each variable fig, axs = plt.subplots(nrows=3, ncols=3, figsize=(30, 15)) fig.suptitle("Credit Card Customers Dataset by Attrition Flag") # plot for customer age sns.histplot(data=df, x="Customer_Age", hue="Attrition_Flag", kde=True, ax=axs[0, 0]) axs[0, 0].set_title("Customer_Age") # plot for gender sns.countplot(data=df, x="Gender", hue="Attrition_Flag", ax=axs[0, 1]) axs[0, 1].set_title("Gender") for p in axs[0, 1].patches: axs[0, 1].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for education level sns.countplot(data=df, x="Education_Level", hue="Attrition_Flag", ax=axs[0, 2]) axs[0, 2].set_title("Education Level") for p in axs[0, 2].patches: axs[0, 2].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for income category sns.countplot(data=df, x="Income_Category", hue="Attrition_Flag", ax=axs[1, 0]) axs[1, 0].set_title("Income Category") for p in axs[1, 0].patches: axs[1, 0].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for card utilization rate sns.histplot( data=df, x="Avg_Utilization_Ratio", hue="Attrition_Flag", kde=True, ax=axs[1, 1] ) axs[1, 1].set_title("Card Utilization Rate") # plot for total transaction amount sns.histplot(data=df, x="Total_Trans_Amt", hue="Attrition_Flag", kde=True, ax=axs[1, 2]) axs[1, 2].set_title("Total Transaction Amount") for p in axs[1, 2].patches: axs[1, 2].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for credit card type sns.countplot(data=df, x="Card_Category", hue="Attrition_Flag", ax=axs[2, 0]) axs[2, 0].set_title("Card_Category") for p in axs[2, 0].patches: axs[2, 0].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for dependent count sns.countplot(data=df, x="Dependent_count", hue="Attrition_Flag", ax=axs[2, 1]) axs[2, 1].set_title("Dependent Count ") for p in axs[2, 1].patches: axs[2, 1].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for total transaction CT sns.histplot(data=df, x="Total_Trans_Ct", hue="Attrition_Flag", kde=True, ax=axs[2, 2]) axs[2, 2].set_title("Total Transaction CT") for p in axs[2, 2].patches: axs[2, 2].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # adjust layout plt.tight_layout()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/807/129807220.ipynb
credit-card-customers
sakshigoyal7
[{"Id": 129807220, "ScriptId": 38605123, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14867967, "CreationDate": "05/16/2023 15:45:05", "VersionNumber": 1.0, "Title": "Churn Rate of Credit Card Service", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 111.0, "LinesInsertedFromPrevious": 111.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 6}]
[{"Id": 186180242, "KernelVersionId": 129807220, "SourceDatasetVersionId": 1660340}]
[{"Id": 1660340, "DatasetId": 982921, "DatasourceVersionId": 1696625, "CreatorUserId": 5618523, "LicenseName": "CC0: Public Domain", "CreationDate": "11/19/2020 07:38:44", "VersionNumber": 1.0, "Title": "Credit Card customers", "Slug": "credit-card-customers", "Subtitle": "Predict Churning customers", "Description": "A manager at the bank is disturbed with more and more customers leaving their credit card services. They would really appreciate if one could predict for them who is gonna get churned so they can proactively go to the customer to provide them better services and turn customers' decisions in the opposite direction\n\nI got this dataset from a website with the URL as https://leaps.analyttica.com/home. I have been using this for a while to get datasets and accordingly work on them to produce fruitful results. The site explains how to solve a particular business problem. \n\nNow, this dataset consists of 10,000 customers mentioning their age, salary, marital_status, credit card limit, credit card category, etc. There are nearly 18 features. \n\nWe have only 16.07% of customers who have churned. Thus, it's a bit difficult to train our model to predict churning customers.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 982921, "CreatorUserId": 5618523, "OwnerUserId": 5618523.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1660340.0, "CurrentDatasourceVersionId": 1696625.0, "ForumId": 999426, "Type": 2, "CreationDate": "11/19/2020 07:38:44", "LastActivityDate": "11/19/2020", "TotalViews": 749413, "TotalDownloads": 86875, "TotalVotes": 1983, "TotalKernels": 387}]
[{"Id": 5618523, "UserName": "sakshigoyal7", "DisplayName": "Sakshi Goyal", "RegisterDate": "08/13/2020", "PerformanceTier": 1}]
import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns print("Setup Complete") my_filepath = "/kaggle/input/credit-card-customers/BankChurners.csv" # Membaca dataset df = pd.read_csv(my_filepath) df # Melihat informasi dataset df.info() # Melihat deskripsi dataset df.describe() # Melihat presentase churn rate nasabah kartu kredit plt.title("Churn Rate of Credit Card Service (Attrition_Flag)") plt.pie( df["Attrition_Flag"].value_counts(), labels=df["Attrition_Flag"].value_counts().index, autopct="%1.2f%%", ) plt.show() # Dari data yang diperoleh dari suatu bank karena banyaknya nasabah yang mengakhiri atau menghentikan layanan kartu kreditnya, diperoleh informasi bahwa ada sekitar 16.07% yakni 1627 nasabah (Attrited Customer) yang memilih mengakhiri atau menghentikan layanan kartu kreditnya. Hal ini dapat kita analisa lebih lanjut pada ***Credit Card Customers Dataset by Attrition Flag*** pada bagan dibawah ini: # # group by Attrition_Flag attrition_group = df.groupby("Attrition_Flag") # create subplots for each variable fig, axs = plt.subplots(nrows=3, ncols=3, figsize=(30, 15)) fig.suptitle("Credit Card Customers Dataset by Attrition Flag") # plot for customer age sns.histplot(data=df, x="Customer_Age", hue="Attrition_Flag", kde=True, ax=axs[0, 0]) axs[0, 0].set_title("Customer_Age") # plot for gender sns.countplot(data=df, x="Gender", hue="Attrition_Flag", ax=axs[0, 1]) axs[0, 1].set_title("Gender") for p in axs[0, 1].patches: axs[0, 1].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for education level sns.countplot(data=df, x="Education_Level", hue="Attrition_Flag", ax=axs[0, 2]) axs[0, 2].set_title("Education Level") for p in axs[0, 2].patches: axs[0, 2].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for income category sns.countplot(data=df, x="Income_Category", hue="Attrition_Flag", ax=axs[1, 0]) axs[1, 0].set_title("Income Category") for p in axs[1, 0].patches: axs[1, 0].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for card utilization rate sns.histplot( data=df, x="Avg_Utilization_Ratio", hue="Attrition_Flag", kde=True, ax=axs[1, 1] ) axs[1, 1].set_title("Card Utilization Rate") # plot for total transaction amount sns.histplot(data=df, x="Total_Trans_Amt", hue="Attrition_Flag", kde=True, ax=axs[1, 2]) axs[1, 2].set_title("Total Transaction Amount") for p in axs[1, 2].patches: axs[1, 2].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for credit card type sns.countplot(data=df, x="Card_Category", hue="Attrition_Flag", ax=axs[2, 0]) axs[2, 0].set_title("Card_Category") for p in axs[2, 0].patches: axs[2, 0].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for dependent count sns.countplot(data=df, x="Dependent_count", hue="Attrition_Flag", ax=axs[2, 1]) axs[2, 1].set_title("Dependent Count ") for p in axs[2, 1].patches: axs[2, 1].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # plot for total transaction CT sns.histplot(data=df, x="Total_Trans_Ct", hue="Attrition_Flag", kde=True, ax=axs[2, 2]) axs[2, 2].set_title("Total Transaction CT") for p in axs[2, 2].patches: axs[2, 2].annotate( f"\n{p.get_height():.0f}", (p.get_x() + p.get_width() / 2.0, p.get_height()), ha="center", va="center", fontsize=10, color="black", xytext=(0, 13), textcoords="offset points", ) # adjust layout plt.tight_layout()
false
0
1,697
6
1,924
1,697
129807717
<jupyter_start><jupyter_text>Mental Health and Suicide Rates ### Context Close to 800 000 people die due to suicide every year, which is one person every 40 seconds. Suicide is a global phenomenon and occurs throughout the lifespan. Effective and evidence-based interventions can be implemented at population, sub-population and individual levels to prevent suicide and suicide attempts. There are indications that for each adult who died by suicide there may have been more than 20 others attempting suicide. Suicide is a complex issue and therefore suicide prevention efforts require coordination and collaboration among multiple sectors of society, including the health sector and other sectors such as education, labour, agriculture, business, justice, law, defense, politics, and the media. These efforts must be comprehensive and integrated as no single approach alone can make an impact on an issue as complex as suicide. ### Do leave an upvote if you found this dataset useful! Kaggle dataset identifier: mental-health-and-suicide-rates <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/mental-health-and-suicide-rates/Age-standardized suicide rates.csv" ) # df2 = pd.read_csv("/kaggle/input/mental-health-and-suicide-rates/Crude suicide rates.csv") df # df2.describe() import matplotlib.pyplot as plt import seaborn as sns countplot = sns.countplot(data=df, x="2016") # ### in the Year 2016 whose have a higher suicide rate, Females or Males? year_16 = df[df["2016"] > 0] by_sex = year_16.groupby("Sex", as_index=False) by_sex_16 = by_sex["2016"].mean() barplot = sns.barplot(x="Sex", y="2016", data=by_sex_16) # ### as the figure shows, males have a higher suicide rate than females, because 2 reasons: # 1. Men have an easily accessed weapons than women: in the USA 6 men out of 10 have weapons # 2. When men decide to suicide they really meant it, and they do it # 3. Personal Opinion: Women talk, men don't talk # ### in 2016 which Country has the most suicide rate? avg = year_16.groupby("Country", as_index=False)["2016"].mean() # avg = avg.head(3) relplot = sns.relplot(x="Country", y="2016", data=avg)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/807/129807717.ipynb
mental-health-and-suicide-rates
twinkle0705
[{"Id": 129807717, "ScriptId": 38603079, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14721492, "CreationDate": "05/16/2023 15:48:59", "VersionNumber": 1.0, "Title": "Mental Health1", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 49.0, "LinesInsertedFromPrevious": 49.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186180872, "KernelVersionId": 129807717, "SourceDatasetVersionId": 1338480}]
[{"Id": 1338480, "DatasetId": 748724, "DatasourceVersionId": 1370824, "CreatorUserId": 3649586, "LicenseName": "Attribution-NonCommercial-ShareAlike 3.0 IGO (CC BY-NC-SA 3.0 IGO)", "CreationDate": "07/15/2020 12:33:00", "VersionNumber": 2.0, "Title": "Mental Health and Suicide Rates", "Slug": "mental-health-and-suicide-rates", "Subtitle": "Suicide Rates of age groups in different countries along with Health Facility", "Description": "### Context\n\nClose to 800 000 people die due to suicide every year, which is one person every 40 seconds. Suicide is a global phenomenon and occurs throughout the lifespan. Effective and evidence-based interventions can be implemented at population, sub-population and individual levels to prevent suicide and suicide attempts. There are indications that for each adult who died by suicide there may have been more than 20 others attempting suicide.\n\nSuicide is a complex issue and therefore suicide prevention efforts require coordination and collaboration among multiple sectors of society, including the health sector and other sectors such as education, labour, agriculture, business, justice, law, defense, politics, and the media. These efforts must be comprehensive and integrated as no single approach alone can make an impact on an issue as complex as suicide.\n\n### Do leave an upvote if you found this dataset useful!", "VersionNotes": "updated files", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 748724, "CreatorUserId": 3649586, "OwnerUserId": 3649586.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1338480.0, "CurrentDatasourceVersionId": 1370824.0, "ForumId": 763628, "Type": 2, "CreationDate": "06/30/2020 16:51:51", "LastActivityDate": "06/30/2020", "TotalViews": 106199, "TotalDownloads": 12135, "TotalVotes": 211, "TotalKernels": 10}]
[{"Id": 3649586, "UserName": "twinkle0705", "DisplayName": "Twinkle Khanna", "RegisterDate": "09/01/2019", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/mental-health-and-suicide-rates/Age-standardized suicide rates.csv" ) # df2 = pd.read_csv("/kaggle/input/mental-health-and-suicide-rates/Crude suicide rates.csv") df # df2.describe() import matplotlib.pyplot as plt import seaborn as sns countplot = sns.countplot(data=df, x="2016") # ### in the Year 2016 whose have a higher suicide rate, Females or Males? year_16 = df[df["2016"] > 0] by_sex = year_16.groupby("Sex", as_index=False) by_sex_16 = by_sex["2016"].mean() barplot = sns.barplot(x="Sex", y="2016", data=by_sex_16) # ### as the figure shows, males have a higher suicide rate than females, because 2 reasons: # 1. Men have an easily accessed weapons than women: in the USA 6 men out of 10 have weapons # 2. When men decide to suicide they really meant it, and they do it # 3. Personal Opinion: Women talk, men don't talk # ### in 2016 which Country has the most suicide rate? avg = year_16.groupby("Country", as_index=False)["2016"].mean() # avg = avg.head(3) relplot = sns.relplot(x="Country", y="2016", data=avg)
false
1
594
0
852
594
129807753
# # Libraries and Data imports import pandas as pd import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report # from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler # ## Models from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier df_orig = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv") df_orig.head() # # EDA sns.swarmplot(x=df_orig["RoomService"], y=df_orig["Transported"]) # # Data modification df_orig.info() # V1 # cols_to_use = ['HomePlanet','CryoSleep','Cabin','Age','VIP'] # V2 This do not chage the result # cols_to_use = ['HomePlanet','CryoSleep','Cabin','Age','VIP','Age'] # V3 # cols_to_use = ['HomePlanet','CryoSleep','Age','VIP'] # V4 ~50% # cols_to_use = ['VIP'] # V5 ~70% # cols_to_use = ['CryoSleep'] # V6 ~70% # cols_to_use = ['CryoSleep','HomePlanet'] # V7 # cols_to_use = ['HomePlanet'] # V8 # cols_to_use = ['Age','RoomService'] # V10 ~70% # cols_to_use = ['Age','RoomService','VRDeck'] # V11 73% # cols_to_use = ['Age','RoomService','VRDeck','Spa'] # V12 73% # cols_to_use = ['Age','RoomService','VRDeck','Spa','ShoppingMall'] # V13 73% # cols_to_use = ['Age','RoomService','VRDeck','Spa','FoodCourt'] # V14 73% cols_to_use = ["Age", "RoomService", "VRDeck", "Spa", "CryoSleep"] cols_to_train = [*cols_to_use, "Transported"] df = df_orig[cols_to_train] df.head() def CategoriseCabin(dataframe): df_temp = dataframe df_temp["Cabin_Deck"] = df_temp["Cabin"].apply(lambda x: str(x).split("/")[0]) df_temp["Cabin_Num"] = df_temp["Cabin"].apply(lambda x: str(x)[2]) df_temp["Cabin_Side"] = df_temp["Cabin"].apply(lambda x: str(x).split("/")[-1]) return df_temp.drop(["Cabin"], axis=1) df = CategoriseCabin(df) df.head() print("deck unique", df["Cabin_Deck"].unique()) print("num unique", df["Cabin_Num"].unique()) print("side unique", df["Cabin_Side"].unique()) def convertToInt(dataframe): df_temp = dataframe df_temp["CryoSleep"] = df_temp["CryoSleep"].fillna(-1) df_temp["CryoSleep"] = df_temp["CryoSleep"].astype(int) df_temp["VIP"] = df_temp["VIP"].fillna(-1) df_temp["VIP"] = df_temp["VIP"].astype(int) df_temp.loc[df_temp["Cabin_Num"] == "n", "Cabin_Num"] = -1 # f_temp['Cabin_Num'] = df_temp['Cabin_Num'].fillna(-1) df_temp["Cabin_Num"] = df_temp["Cabin_Num"].astype(int) return df_temp def convertToIntVip(dataframe): df_temp = dataframe df_temp["VIP"] = df_temp["VIP"].fillna(-1) df_temp["VIP"] = df_temp["VIP"].astype(int) return df_temp def convertToIntCryo(dataframe): df_temp = dataframe df_temp["CryoSleep"] = df_temp["CryoSleep"].fillna(-1) df_temp["CryoSleep"] = df_temp["CryoSleep"].astype(int) return df_temp def convertToIntWithoutCabin(dataframe): df_temp = dataframe df_temp["CryoSleep"] = df_temp["CryoSleep"].fillna(-1) df_temp["CryoSleep"] = df_temp["CryoSleep"].astype(int) df_temp["VIP"] = df_temp["VIP"].fillna(-1) df_temp["VIP"] = df_temp["VIP"].astype(int) return df_temp # df = convertToInt(df) # df = convertToIntWithoutCabin(df) # df = convertToIntVip(df) df = convertToIntCryo(df) df.head() def oneHotEnc(data): ohe = OneHotEncoder(handle_unknown="ignore", sparse=False) cols_to_tranform = ["HomePlanet", "Cabin_Deck", "Cabin_Side"] df_temp = data.copy() transformed = pd.DataFrame(ohe.fit_transform(data[cols_to_tranform]).astype(int)) df_temp = pd.concat([df_temp, transformed], axis=1).drop(cols_to_tranform, axis=1) return df_temp def oneHotEncWithoutCabin(data): ohe = OneHotEncoder(handle_unknown="ignore", sparse=False) cols_to_tranform = ["HomePlanet"] df_temp = data.copy() transformed = pd.DataFrame(ohe.fit_transform(data[cols_to_tranform]).astype(int)) df_temp = pd.concat([df_temp, transformed], axis=1).drop(cols_to_tranform, axis=1) return df_temp # df = oneHotEnc(df) df = oneHotEncWithoutCabin(df) df.head() df.columns = df.columns.astype(str) # df = df.fillna(-1) df = df.fillna(0) df.head() # ## Normalize def normaAge(data): df_temp = data[["Age", "RoomService"]] # df_temp = data.drop(["Transported","Age"], axis=1) scaler = MinMaxScaler() # mm_scaler = preprocessing.MinMaxScaler() df_temp = pd.DataFrame( scaler.fit_transform(df_temp), columns=["Age", "RoomService"] ) data[["Age", "RoomService"]] = df_temp return data from sklearn.preprocessing import StandardScaler def stdAge(data): cols = ["Age", "RoomService", "VRDeck", "Spa"] df_temp = data[cols] print(df_temp) # df_temp = data.drop(["Transported","Age"], axis=1) scaler = StandardScaler() # mm_scaler = preprocessing.MinMaxScaler() df_temp = pd.DataFrame(scaler.fit_transform(df_temp), columns=cols) data[cols] = df_temp return data df_te = normaAge(df) df_te.head() df_te = stdAge(df) df_te.head() # # Training y = df["Transported"] X = df.drop(["Transported"], axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y) X_train.info() # ## Knn knn_model = KNeighborsClassifier(n_neighbors=4) knn_model.fit(X_train, y_train) knn_prediction = knn_model.predict(X_test) print(classification_report(y_test, knn_prediction)) # The V1 is probabily wrong # V1 accuracy 0.81 # V2 accuracy 0.63 # V3 accuracy 0.70 # V4 accuracy 0.51 # V5 accuracy 0.70 # V6 accuracy 0.67 # V7 accuracy 0.60 # V8 accuracy 0.51 # V9(std) accuracy 0.60 # V9(nor) accuracy 0.59 # V10 accuracy 0.66 # V11 accuracy 0.71 # V12 accuracy 0.70 # V13 accuracy 0.72 # V14 accuracy 0.71 # ## Naive Bayes nb_model = GaussianNB() nb_model = nb_model.fit(X_train, y_train) pred_nb = nb_model.predict(X_test) print(classification_report(y_test, pred_nb)) # V1 accuracy 0.71 # V2 accuracy 0.72 # V3 accuracy 0.69 # V4 accuracy 0.51 # V5 accuracy 0.70 # V6 accuracy 0.70 # V7 accuracy 0.59 # V8 accuracy 0.53 # V9(std) accuracy 0.61 # V9(nor) accuracy 0.59 # V10 accuracy 0.64 # V11 accuracy 0.65 # V12 accuracy 0.65 # V13 accuracy 0.65 # V14 accuracy 0.66 # ## Log regression lr_model = LogisticRegression() lnr_model = lr_model.fit(X_train, y_train) y_pred_lr = lr_model.predict(X_test) print(classification_report(y_test, y_pred_lr)) # V1 accuracy 0.73 # V2 accuracy 0.72 # V3 accuracy 0.70 # V4 accuracy 0.51 # V5 accuracy 0.70 # V6 accuracy 0.70 # V7 accuracy 0.59 # V8 accuracy 0.52 # V9(std) accuracy 0.64 # V9(nor) accuracy 0.61 # V10 accuracy 0.71 # V11 accuracy 0.76 # V12 accuracy 0.75 # V13 accuracy 0.76 # V14 accuracy 0.75 # ## Random forest forest = RandomForestClassifier(n_estimators=100, random_state=100) forest.fit(X_train, y_train) forest_predict = forest.predict(X_test) print(classification_report(y_test, forest_predict)) # V1 accuracy 0.70 # V2 accuracy 0.70 # V3 accuracy 0.72 # V4 accuracy 0.51 # V5 accuracy 0.70 # V6 accuracy 0.73 # V7 accuracy 0.59 # V8 accuracy 0.54 # V9(std) accuracy 0.61 # V9(nor) accuracy 0.61 # V10 accuracy 0.71 # V11 accuracy 0.76 # V12 accuracy 0.74 # V12 accuracy 0.77 # V13 accuracy 0.74 # # Result df_test_og = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv") df_test = df_test_og df_test.head() df_test = df_test[cols_to_use] # df_test = CategoriseCabin(df_test) df_test = convertToIntCryo(df_test) df_test = stdAge(df_test) df_test = df_test.fillna(0) # df_test = oneHotEnc(df_test) # f_test = # df_test = # print('deck unique',df_test['Cabin_Deck'].unique()) # print('num unique',df_test['Cabin_Num'].unique()) # print('side unique',df_test['Cabin_Side'].unique()) df_test.head() df_test.columns = df_test.columns.astype(str) df_test = df_test.fillna(-1) df_test.head() pred_result = forest.predict(df_test) print(pred_result) Survived = pd.DataFrame( pred_result, index=df_test_og["PassengerId"], columns=["Transported"] ) print(Survived.columns) Survived.head() Survived.to_csv("Second_model.csv", header=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/807/129807753.ipynb
null
null
[{"Id": 129807753, "ScriptId": 38378007, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13768217, "CreationDate": "05/16/2023 15:49:19", "VersionNumber": 1.0, "Title": "Space-titanic", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 320.0, "LinesInsertedFromPrevious": 320.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Libraries and Data imports import pandas as pd import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report # from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler # ## Models from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier df_orig = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv") df_orig.head() # # EDA sns.swarmplot(x=df_orig["RoomService"], y=df_orig["Transported"]) # # Data modification df_orig.info() # V1 # cols_to_use = ['HomePlanet','CryoSleep','Cabin','Age','VIP'] # V2 This do not chage the result # cols_to_use = ['HomePlanet','CryoSleep','Cabin','Age','VIP','Age'] # V3 # cols_to_use = ['HomePlanet','CryoSleep','Age','VIP'] # V4 ~50% # cols_to_use = ['VIP'] # V5 ~70% # cols_to_use = ['CryoSleep'] # V6 ~70% # cols_to_use = ['CryoSleep','HomePlanet'] # V7 # cols_to_use = ['HomePlanet'] # V8 # cols_to_use = ['Age','RoomService'] # V10 ~70% # cols_to_use = ['Age','RoomService','VRDeck'] # V11 73% # cols_to_use = ['Age','RoomService','VRDeck','Spa'] # V12 73% # cols_to_use = ['Age','RoomService','VRDeck','Spa','ShoppingMall'] # V13 73% # cols_to_use = ['Age','RoomService','VRDeck','Spa','FoodCourt'] # V14 73% cols_to_use = ["Age", "RoomService", "VRDeck", "Spa", "CryoSleep"] cols_to_train = [*cols_to_use, "Transported"] df = df_orig[cols_to_train] df.head() def CategoriseCabin(dataframe): df_temp = dataframe df_temp["Cabin_Deck"] = df_temp["Cabin"].apply(lambda x: str(x).split("/")[0]) df_temp["Cabin_Num"] = df_temp["Cabin"].apply(lambda x: str(x)[2]) df_temp["Cabin_Side"] = df_temp["Cabin"].apply(lambda x: str(x).split("/")[-1]) return df_temp.drop(["Cabin"], axis=1) df = CategoriseCabin(df) df.head() print("deck unique", df["Cabin_Deck"].unique()) print("num unique", df["Cabin_Num"].unique()) print("side unique", df["Cabin_Side"].unique()) def convertToInt(dataframe): df_temp = dataframe df_temp["CryoSleep"] = df_temp["CryoSleep"].fillna(-1) df_temp["CryoSleep"] = df_temp["CryoSleep"].astype(int) df_temp["VIP"] = df_temp["VIP"].fillna(-1) df_temp["VIP"] = df_temp["VIP"].astype(int) df_temp.loc[df_temp["Cabin_Num"] == "n", "Cabin_Num"] = -1 # f_temp['Cabin_Num'] = df_temp['Cabin_Num'].fillna(-1) df_temp["Cabin_Num"] = df_temp["Cabin_Num"].astype(int) return df_temp def convertToIntVip(dataframe): df_temp = dataframe df_temp["VIP"] = df_temp["VIP"].fillna(-1) df_temp["VIP"] = df_temp["VIP"].astype(int) return df_temp def convertToIntCryo(dataframe): df_temp = dataframe df_temp["CryoSleep"] = df_temp["CryoSleep"].fillna(-1) df_temp["CryoSleep"] = df_temp["CryoSleep"].astype(int) return df_temp def convertToIntWithoutCabin(dataframe): df_temp = dataframe df_temp["CryoSleep"] = df_temp["CryoSleep"].fillna(-1) df_temp["CryoSleep"] = df_temp["CryoSleep"].astype(int) df_temp["VIP"] = df_temp["VIP"].fillna(-1) df_temp["VIP"] = df_temp["VIP"].astype(int) return df_temp # df = convertToInt(df) # df = convertToIntWithoutCabin(df) # df = convertToIntVip(df) df = convertToIntCryo(df) df.head() def oneHotEnc(data): ohe = OneHotEncoder(handle_unknown="ignore", sparse=False) cols_to_tranform = ["HomePlanet", "Cabin_Deck", "Cabin_Side"] df_temp = data.copy() transformed = pd.DataFrame(ohe.fit_transform(data[cols_to_tranform]).astype(int)) df_temp = pd.concat([df_temp, transformed], axis=1).drop(cols_to_tranform, axis=1) return df_temp def oneHotEncWithoutCabin(data): ohe = OneHotEncoder(handle_unknown="ignore", sparse=False) cols_to_tranform = ["HomePlanet"] df_temp = data.copy() transformed = pd.DataFrame(ohe.fit_transform(data[cols_to_tranform]).astype(int)) df_temp = pd.concat([df_temp, transformed], axis=1).drop(cols_to_tranform, axis=1) return df_temp # df = oneHotEnc(df) df = oneHotEncWithoutCabin(df) df.head() df.columns = df.columns.astype(str) # df = df.fillna(-1) df = df.fillna(0) df.head() # ## Normalize def normaAge(data): df_temp = data[["Age", "RoomService"]] # df_temp = data.drop(["Transported","Age"], axis=1) scaler = MinMaxScaler() # mm_scaler = preprocessing.MinMaxScaler() df_temp = pd.DataFrame( scaler.fit_transform(df_temp), columns=["Age", "RoomService"] ) data[["Age", "RoomService"]] = df_temp return data from sklearn.preprocessing import StandardScaler def stdAge(data): cols = ["Age", "RoomService", "VRDeck", "Spa"] df_temp = data[cols] print(df_temp) # df_temp = data.drop(["Transported","Age"], axis=1) scaler = StandardScaler() # mm_scaler = preprocessing.MinMaxScaler() df_temp = pd.DataFrame(scaler.fit_transform(df_temp), columns=cols) data[cols] = df_temp return data df_te = normaAge(df) df_te.head() df_te = stdAge(df) df_te.head() # # Training y = df["Transported"] X = df.drop(["Transported"], axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y) X_train.info() # ## Knn knn_model = KNeighborsClassifier(n_neighbors=4) knn_model.fit(X_train, y_train) knn_prediction = knn_model.predict(X_test) print(classification_report(y_test, knn_prediction)) # The V1 is probabily wrong # V1 accuracy 0.81 # V2 accuracy 0.63 # V3 accuracy 0.70 # V4 accuracy 0.51 # V5 accuracy 0.70 # V6 accuracy 0.67 # V7 accuracy 0.60 # V8 accuracy 0.51 # V9(std) accuracy 0.60 # V9(nor) accuracy 0.59 # V10 accuracy 0.66 # V11 accuracy 0.71 # V12 accuracy 0.70 # V13 accuracy 0.72 # V14 accuracy 0.71 # ## Naive Bayes nb_model = GaussianNB() nb_model = nb_model.fit(X_train, y_train) pred_nb = nb_model.predict(X_test) print(classification_report(y_test, pred_nb)) # V1 accuracy 0.71 # V2 accuracy 0.72 # V3 accuracy 0.69 # V4 accuracy 0.51 # V5 accuracy 0.70 # V6 accuracy 0.70 # V7 accuracy 0.59 # V8 accuracy 0.53 # V9(std) accuracy 0.61 # V9(nor) accuracy 0.59 # V10 accuracy 0.64 # V11 accuracy 0.65 # V12 accuracy 0.65 # V13 accuracy 0.65 # V14 accuracy 0.66 # ## Log regression lr_model = LogisticRegression() lnr_model = lr_model.fit(X_train, y_train) y_pred_lr = lr_model.predict(X_test) print(classification_report(y_test, y_pred_lr)) # V1 accuracy 0.73 # V2 accuracy 0.72 # V3 accuracy 0.70 # V4 accuracy 0.51 # V5 accuracy 0.70 # V6 accuracy 0.70 # V7 accuracy 0.59 # V8 accuracy 0.52 # V9(std) accuracy 0.64 # V9(nor) accuracy 0.61 # V10 accuracy 0.71 # V11 accuracy 0.76 # V12 accuracy 0.75 # V13 accuracy 0.76 # V14 accuracy 0.75 # ## Random forest forest = RandomForestClassifier(n_estimators=100, random_state=100) forest.fit(X_train, y_train) forest_predict = forest.predict(X_test) print(classification_report(y_test, forest_predict)) # V1 accuracy 0.70 # V2 accuracy 0.70 # V3 accuracy 0.72 # V4 accuracy 0.51 # V5 accuracy 0.70 # V6 accuracy 0.73 # V7 accuracy 0.59 # V8 accuracy 0.54 # V9(std) accuracy 0.61 # V9(nor) accuracy 0.61 # V10 accuracy 0.71 # V11 accuracy 0.76 # V12 accuracy 0.74 # V12 accuracy 0.77 # V13 accuracy 0.74 # # Result df_test_og = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv") df_test = df_test_og df_test.head() df_test = df_test[cols_to_use] # df_test = CategoriseCabin(df_test) df_test = convertToIntCryo(df_test) df_test = stdAge(df_test) df_test = df_test.fillna(0) # df_test = oneHotEnc(df_test) # f_test = # df_test = # print('deck unique',df_test['Cabin_Deck'].unique()) # print('num unique',df_test['Cabin_Num'].unique()) # print('side unique',df_test['Cabin_Side'].unique()) df_test.head() df_test.columns = df_test.columns.astype(str) df_test = df_test.fillna(-1) df_test.head() pred_result = forest.predict(df_test) print(pred_result) Survived = pd.DataFrame( pred_result, index=df_test_og["PassengerId"], columns=["Transported"] ) print(Survived.columns) Survived.head() Survived.to_csv("Second_model.csv", header=True)
false
0
3,136
0
3,136
3,136
129807058
<jupyter_start><jupyter_text>AMEX_data_sampled Kaggle dataset identifier: amex-data-sampled <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ### 🔥 OptBinning 라이브러리를 활용한 신용평가 모델 개발 # > #### 데이터를 불러와 전처리하는 과정은 이전과 동일합니다. 다만 임의로 설명 변수를 선택하기 때문에 선택된 설명 변수는 다를 수 있습니다. import warnings warnings.filterwarnings("ignore", module="sklearn.metrics.cluster") df = pd.read_pickle("/kaggle/input/amex-data-sampled/train_df_sample.pkl") # > ##### 💡 데이터는 위에서 프린트된 경로를 사용해야 합니다 def drop_null_cols(df, threshold=0.8): """ 데이터프레임에서 결측치 비율이 threshold 이상인 변수를 제거하는 함수 """ null_percent = df.isnull().mean() drop_cols = list(null_percent[null_percent >= threshold].index) df = df.drop(drop_cols, axis=1) print(f"Dropped {len(drop_cols)} columns: {', '.join(drop_cols)}") return df df = df.reset_index() import hashlib def encode_customer_id(id_str): encoded_id = hashlib.sha256(id_str.encode("utf-8")).hexdigest()[:16] return encoded_id df["customer_ID"] = df["customer_ID"].apply(encode_customer_id) df = drop_null_cols(df) cat_features = [ "B_30", "B_38", "D_114", "D_116", "D_117", "D_120", "D_126", "D_63", "D_64", "D_68", ] cat_features = [f"{cf}_last" for cf in cat_features] import random num_cols = df.select_dtypes(include=np.number).columns.tolist() num_cols = [col for col in num_cols if "target" not in col and col not in cat_features] num_cols_sample = random.sample([col for col in num_cols if "target" not in col], 100) feature_list = num_cols_sample + cat_features all_list = feature_list + ["target"] df = df[all_list] import gc gc.collect() for categorical_feature in cat_features: if df[categorical_feature].dtype == "float16": df[categorical_feature] = df[categorical_feature].astype(str) if df[categorical_feature].dtype == "category": df[categorical_feature] = df[categorical_feature].astype(str) elif df[categorical_feature].dtype == "object": df[categorical_feature] = df[categorical_feature].astype(str) from sklearn.preprocessing import LabelEncoder le_encoder = LabelEncoder() for categorical_feature in cat_features: df[categorical_feature].fillna(value="NaN", inplace=True) df[categorical_feature] = le_encoder.fit_transform(df[categorical_feature]) from sklearn.impute import SimpleImputer def impute_nan(df, num_cols, strategy="mean"): """ NaN 값을 strategy에 따라 num_cols에 대해 impute하는 함수 :param df: DataFrame :param num_cols: list, imputation 대상 numeric column 리스트 :param strategy: str, imputation 전략 (default: 'mean') :return: DataFrame, imputed DataFrame """ imputer = SimpleImputer(strategy=strategy) df[num_cols] = imputer.fit_transform(df[num_cols]) return df df = impute_nan(df, num_cols_sample, strategy="mean") df.head() import plotly.express as px fig2 = px.pie( df, names="target", height=400, width=600, hole=0.7, title="target class Overview", color_discrete_sequence=["#4c78a8", "#72b7b2"], ) fig2.update_traces( hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0 ) fig2.update_layout( margin=dict(t=100, b=30, l=0, r=0), showlegend=False, plot_bgcolor="#fafafa", paper_bgcolor="#fafafa", title_font=dict(size=20, color="#555", family="Lato, sans-serif"), font=dict(size=17, color="#8a8d93"), hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"), ) fig2.show() # # optbinning library 설치 # > #### !pip install은 Python 환경에서 패키지를 설치하기 위한 표준적인 방법이며, %pip install은 IPython 환경에서 제공되는 패키지 관리 명령어입니다. 일반적으로 Jupyter Notebook에서는 !pip install이 더 일반적으로 사용되며, %pip install은 특정한 경우에 사용될 수 있습니다. from optbinning import BinningProcess # # 💡 Notes: # Binning Process는 데이터셋의 변수들을 이진, 연속형 또는 다중 클래스 타겟 데이터 유형을 기반으로 최적의 구간화(bin)을 계산하기 위한 프로세스입니다. # 매개변수: # * variable_names (array-like): 변수 이름의 리스트입니다. # * max_n_prebins (int, default=20): Pre-binning(사전 구간화) 후의 최대 구간 수입니다. # * min_prebin_size (float, default=0.05): 각 Pre-bin에 대한 최소 레코드 수의 비율입니다. # * min_n_bins (int 또는 None, optional, default=None): 최소 구간 수입니다. None인 경우, min_n_bins은 [0, max_n_prebins] 범위의 값입니다. # * max_n_bins (int 또는 None, optional, default=None): 최대 구간 수입니다. None인 경우, max_n_bins은 [0, max_n_prebins] 범위의 값입니다. # * min_bin_size (float 또는 None, optional, default=None): 각 구간의 최소 레코드 수의 비율입니다. None인 경우, min_bin_size = min_prebin_size입니다. # * max_bin_size (float 또는 None, optional, default=None): 각 구간의 최대 레코드 수의 비율입니다. None인 경우, max_bin_size = 1.0입니다. # * max_pvalue (float 또는 None, optional, default=None): 구간 간의 최대 p-value입니다. # * max_pvalue_policy (str, optional, default="consecutive"): p-value 조건을 만족하지 않는 구간을 결정하는 방법입니다. "consecutive"는 연속된 구간을 비교하고, "all"은 모든 구간을 비교합니다. # * selection_criteria (dict 또는 None, optional, default=None): 변수 선택 기준입니다. 자세한 내용은 참고 사항을 참조하세요. # * fixed_variables (array-like 또는 None, optional, default=None): 고정할 변수의 리스트입니다. 선택 기준을 만족하지 않을 경우에도 이러한 변수를 유지합니다. # * special_codes (array-like 또는 None, optional, default=None): 특별한 코드의 리스트입니다. 이러한 코드를 사용하여 따로 처리해야 하는 데이터 값을 지정할 수 있습니다. # * split_digits (int 또는 None, optional, default=None): 분할 지점의 유효 숫자 자릿수입니다. split_digits가 0으로 설정되면 분할 지점은 정수로 처리됩니다. None인 경우, 분할 지점의 모든 유효 숫자 자릿수가 고려됩니다. # * categorical_variables (array-like 또는 None, optional, default=None): 범주형 변수로 간주할 수치 변수의 리스트입니다. 이는 명목 변수입니다. 타겟 유형이 다중 클래스인 경우에는 해당되지 않습니다. # * binning_fit_params (dict 또는 None, optional, default=None): 특정 변수에 대한 최적 selection_criteria = { "iv": {"min": 0.025, "max": 0.7, "strategy": "highest", "top": 20}, "quality_score": {"min": 0.01}, } # > #### Feature Selection 프로세스를 간소화 할 수 있습니다. 조건을 정해두면 해당조건에 맞는 변수를 추출할 수 있습니다. binning_process = BinningProcess( feature_list, categorical_variables=cat_features, selection_criteria=selection_criteria, ) X = df[feature_list] y = df["target"] binning_process.fit(X, y) binning_process.information(print_level=2) binning_process.summary() # > #### 선택된 변수만 보고싶다면 아래 코드를 참조해주세요. # summary = binning_process.summary() selected_summary = summary[summary["selected"] == True] selected_summary optb = binning_process.get_binned_variable("D_42_mean") optb.binning_table.build() # > #### 임의로 변수를 선택하기 때문에 위 변수가 없다면 에러가 날 수 있습니다. 확인 후 실행 바랍니다. # > #### 📊 구간화 그래프를 살펴봅시다. binning_table.plot()을 사용하면 쉽게 그래프를 그릴 수 있습니다. optb.binning_table.plot(metric="event_rate") # > #### 위 변수 같은 경우는, 2 구간에서 불량율이 매우 높게 나타납니다. 이벤트 발생 확률 또한 단조증가함을 알 수 있습니다. 일반적으로 구간화는 이벤트 발생 확률이 단조 증가하면 좋다고 판단합니다. # > ##### optb.binning_table.plot(metric="event_rate")에서 metric은 그래프를 그릴 때 사용할 메트릭(metric)을 지정하는 매개변수입니다. 이 메트릭은 각 구간(bin)의 값을 기준으로 그래프를 그릴 때 사용됩니다. 예를 들어, "event_rate"를 지정하면 각 구간의 이벤트 발생 비율을 기준으로 그래프를 그립니다. # > ##### optb.binning_table.plot()은 optbinning 패키지에서 사용되는 BinningTable 객체의 메서드입니다. BinningTable 객체는 변수의 구간(bin) 정보를 저장하고 분석 결과를 시각화할 수 있는 기능을 제공합니다. 이를 통해 각 구간의 특성을 시각적으로 확인하고 모델 개발에 유용한 통찰력을 얻을 수 있습니다. plot() 메서드를 사용하여 구간별 메트릭에 대한 그래프를 생성하고 시각화할 수 있습니다. metric 매개변수를 적절히 설정하여 원하는 메트릭에 대한 그래프를 생성할 수 있습니다. optb = binning_process.get_binned_variable("D_68_last") optb.binning_table.build() optb.binning_table.plot(metric="event_rate") # > #### 어떤 변수들이 선택되었는지 살펴봅시다. binning_process.get_support(names=True) # > #### 이전 노트북에서 직접 IV와 WOE함수를 만들어 데이터를 변환했던 것을 기억하시나요? 이 작업을 OptBinning을 사용하면 한줄의 코드로 실행 가능합니다. 아래 코드를 살펴보시죠. X_transform = binning_process.transform(X, metric="woe") X_transform # > #### 기존 변수 값이 WOE 값으로 변환(transform)되었습니다. 이제 WOE값을 사용해 모델링을 진행하면 됩니다. from sklearn.linear_model import LogisticRegression from optbinning import Scorecard from optbinning.scorecard import Counterfactual binning_process = BinningProcess( feature_list, categorical_variables=cat_features, selection_criteria=selection_criteria, ) estimator = LogisticRegression(solver="lbfgs") scorecard = Scorecard( binning_process=binning_process, estimator=estimator, scaling_method="min_max", scaling_method_params={"min": 300, "max": 850}, ) scorecard.fit(X, y) # > #### 가장 간단한 로지스틱 회귀를 사용해보겠습니다. estimator에 XGBoost나 LighGBM을 사용해도 좋습니다. # > #### 스코어 카드를 바로 만들어 보겠습니다. 최저 점수는 300점, 최대 점수는 850점입니다. scorecard.table(style="summary") # > #### 위에서 만든 스코어 카드 테이블을 살펴보겠습니다. scorecard.table(style="detailed") # > #### 보다 자세한 테이블은 위와 같습니다. WoE와 IV, JS 값까지 보여줍니다. # > ##### "JS Distance"는 각 구간(bin)의 신용 점수 분포 간의 거리(Distance)를 측정하는 지표입니다. # > ##### 스코어카드에서 "JS Distance"는 구간(bin)별로 신용 점수가 얼마나 다른지를 나타내는 지표로 사용됩니다. 구간별 신용 점수 분포가 서로 다를수록 "JS Distance"는 높아지며, 구간간의 분리도가 높음을 나타냅니다. 이는 스코어카드가 독립 변수의 변화에 따라 점수를 적절하게 조정하여 신용 등급을 산출할 수 있도록 도와줍니다. # sc = scorecard.table(style="summary") sc.groupby("Variable").agg({"Points": [np.min, np.max]}).sum() # > #### 위 코드는 점수가 잘 나왔는지 검증하는 코드입니다. # > #### 📊 만든 스코어를 평가해봅시다. y_pred = scorecard.predict_proba(X)[:, 1] from optbinning.scorecard import plot_auc_roc, plot_cap, plot_ks plot_auc_roc(y, y_pred) # > #### 단순한 모델을 사용하니 AUC가 이전보다 떨어진것을 확인할 수 있습니다. 하지만 데이터 전처리가 잘된 편이라 점수가 아주 낮진 않습니다. plot_ks(y, y_pred) # > #### KS 통계량도 간단한 함수로 계산하고 시각화할 수 있습니다. # > #### 📊 스코어 분포를 살펴보도록 하죠. score = scorecard.score(X) import matplotlib.pyplot as plt mask = y == 0 plt.hist(score[mask], label="non-event", color="b", alpha=0.35) plt.hist(score[~mask], label="event", color="r", alpha=0.35) plt.xlabel("score") plt.legend() plt.show() # ### OptBinning 라이브러리를 사용한 모델 모니터링 # > #### PSI지표를 활용한 모델 드리프트 체크 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, stratify=y, random_state=42 ) # > #### 훈련 데이터세트와 테스트 데이터세트를 분할해줍니다. 여기서 테스트 데이터세트는 우리가 보지 못한 데이터라고 가정합니다. scorecard.fit(X_train, y_train) from optbinning.scorecard import ScorecardMonitoring monitoring = ScorecardMonitoring( scorecard=scorecard, psi_method="cart", psi_n_bins=10, verbose=True ) monitoring.fit(X_test, y_test, X_train, y_train) monitoring.psi_table() # > #### 굉장히 안정적입니다. 그래프도 쉽게 그릴 수 있습니다. monitoring.psi_plot() monitoring.tests_table() # > ##### 이 통계적 검정은 이벤트 비율(카이제곱 검정 - 이진 타겟) 또는 평균(스튜던트 t-검정 - 연속 타겟)이 유의하게 다른지 여부를 결정하기 위해 수행됩니다. 귀무가설은 실제값(actual)이 예상값(expected)과 동일하다는 것입니다. monitoring.system_stability_report() # > #### 시스템 리포트를 통해 확인할 수도 있습니다. monitoring.psi_variable_table(style="summary")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/807/129807058.ipynb
amex-data-sampled
kimtaehun
[{"Id": 129807058, "ScriptId": 38603686, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13683894, "CreationDate": "05/16/2023 15:43:39", "VersionNumber": 1.0, "Title": "4\uac15) OptBinning \ub77c\uc774\ube0c\ub7ec\ub9ac \uc0ac\uc6a9 \ubc29\ubc95", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 320.0, "LinesInsertedFromPrevious": 320.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
[{"Id": 186180026, "KernelVersionId": 129807058, "SourceDatasetVersionId": 5688399}]
[{"Id": 5688399, "DatasetId": 3270398, "DatasourceVersionId": 5763995, "CreatorUserId": 1885842, "LicenseName": "Unknown", "CreationDate": "05/15/2023 07:57:57", "VersionNumber": 1.0, "Title": "AMEX_data_sampled", "Slug": "amex-data-sampled", "Subtitle": "This is a small-sized sampled dataset from AMEX dafault prediction dataset", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3270398, "CreatorUserId": 1885842, "OwnerUserId": 1885842.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5688399.0, "CurrentDatasourceVersionId": 5763995.0, "ForumId": 3336031, "Type": 2, "CreationDate": "05/15/2023 07:57:57", "LastActivityDate": "05/15/2023", "TotalViews": 127, "TotalDownloads": 3, "TotalVotes": 8, "TotalKernels": 3}]
[{"Id": 1885842, "UserName": "kimtaehun", "DisplayName": "DataManyo", "RegisterDate": "05/05/2018", "PerformanceTier": 4}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ### 🔥 OptBinning 라이브러리를 활용한 신용평가 모델 개발 # > #### 데이터를 불러와 전처리하는 과정은 이전과 동일합니다. 다만 임의로 설명 변수를 선택하기 때문에 선택된 설명 변수는 다를 수 있습니다. import warnings warnings.filterwarnings("ignore", module="sklearn.metrics.cluster") df = pd.read_pickle("/kaggle/input/amex-data-sampled/train_df_sample.pkl") # > ##### 💡 데이터는 위에서 프린트된 경로를 사용해야 합니다 def drop_null_cols(df, threshold=0.8): """ 데이터프레임에서 결측치 비율이 threshold 이상인 변수를 제거하는 함수 """ null_percent = df.isnull().mean() drop_cols = list(null_percent[null_percent >= threshold].index) df = df.drop(drop_cols, axis=1) print(f"Dropped {len(drop_cols)} columns: {', '.join(drop_cols)}") return df df = df.reset_index() import hashlib def encode_customer_id(id_str): encoded_id = hashlib.sha256(id_str.encode("utf-8")).hexdigest()[:16] return encoded_id df["customer_ID"] = df["customer_ID"].apply(encode_customer_id) df = drop_null_cols(df) cat_features = [ "B_30", "B_38", "D_114", "D_116", "D_117", "D_120", "D_126", "D_63", "D_64", "D_68", ] cat_features = [f"{cf}_last" for cf in cat_features] import random num_cols = df.select_dtypes(include=np.number).columns.tolist() num_cols = [col for col in num_cols if "target" not in col and col not in cat_features] num_cols_sample = random.sample([col for col in num_cols if "target" not in col], 100) feature_list = num_cols_sample + cat_features all_list = feature_list + ["target"] df = df[all_list] import gc gc.collect() for categorical_feature in cat_features: if df[categorical_feature].dtype == "float16": df[categorical_feature] = df[categorical_feature].astype(str) if df[categorical_feature].dtype == "category": df[categorical_feature] = df[categorical_feature].astype(str) elif df[categorical_feature].dtype == "object": df[categorical_feature] = df[categorical_feature].astype(str) from sklearn.preprocessing import LabelEncoder le_encoder = LabelEncoder() for categorical_feature in cat_features: df[categorical_feature].fillna(value="NaN", inplace=True) df[categorical_feature] = le_encoder.fit_transform(df[categorical_feature]) from sklearn.impute import SimpleImputer def impute_nan(df, num_cols, strategy="mean"): """ NaN 값을 strategy에 따라 num_cols에 대해 impute하는 함수 :param df: DataFrame :param num_cols: list, imputation 대상 numeric column 리스트 :param strategy: str, imputation 전략 (default: 'mean') :return: DataFrame, imputed DataFrame """ imputer = SimpleImputer(strategy=strategy) df[num_cols] = imputer.fit_transform(df[num_cols]) return df df = impute_nan(df, num_cols_sample, strategy="mean") df.head() import plotly.express as px fig2 = px.pie( df, names="target", height=400, width=600, hole=0.7, title="target class Overview", color_discrete_sequence=["#4c78a8", "#72b7b2"], ) fig2.update_traces( hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0 ) fig2.update_layout( margin=dict(t=100, b=30, l=0, r=0), showlegend=False, plot_bgcolor="#fafafa", paper_bgcolor="#fafafa", title_font=dict(size=20, color="#555", family="Lato, sans-serif"), font=dict(size=17, color="#8a8d93"), hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"), ) fig2.show() # # optbinning library 설치 # > #### !pip install은 Python 환경에서 패키지를 설치하기 위한 표준적인 방법이며, %pip install은 IPython 환경에서 제공되는 패키지 관리 명령어입니다. 일반적으로 Jupyter Notebook에서는 !pip install이 더 일반적으로 사용되며, %pip install은 특정한 경우에 사용될 수 있습니다. from optbinning import BinningProcess # # 💡 Notes: # Binning Process는 데이터셋의 변수들을 이진, 연속형 또는 다중 클래스 타겟 데이터 유형을 기반으로 최적의 구간화(bin)을 계산하기 위한 프로세스입니다. # 매개변수: # * variable_names (array-like): 변수 이름의 리스트입니다. # * max_n_prebins (int, default=20): Pre-binning(사전 구간화) 후의 최대 구간 수입니다. # * min_prebin_size (float, default=0.05): 각 Pre-bin에 대한 최소 레코드 수의 비율입니다. # * min_n_bins (int 또는 None, optional, default=None): 최소 구간 수입니다. None인 경우, min_n_bins은 [0, max_n_prebins] 범위의 값입니다. # * max_n_bins (int 또는 None, optional, default=None): 최대 구간 수입니다. None인 경우, max_n_bins은 [0, max_n_prebins] 범위의 값입니다. # * min_bin_size (float 또는 None, optional, default=None): 각 구간의 최소 레코드 수의 비율입니다. None인 경우, min_bin_size = min_prebin_size입니다. # * max_bin_size (float 또는 None, optional, default=None): 각 구간의 최대 레코드 수의 비율입니다. None인 경우, max_bin_size = 1.0입니다. # * max_pvalue (float 또는 None, optional, default=None): 구간 간의 최대 p-value입니다. # * max_pvalue_policy (str, optional, default="consecutive"): p-value 조건을 만족하지 않는 구간을 결정하는 방법입니다. "consecutive"는 연속된 구간을 비교하고, "all"은 모든 구간을 비교합니다. # * selection_criteria (dict 또는 None, optional, default=None): 변수 선택 기준입니다. 자세한 내용은 참고 사항을 참조하세요. # * fixed_variables (array-like 또는 None, optional, default=None): 고정할 변수의 리스트입니다. 선택 기준을 만족하지 않을 경우에도 이러한 변수를 유지합니다. # * special_codes (array-like 또는 None, optional, default=None): 특별한 코드의 리스트입니다. 이러한 코드를 사용하여 따로 처리해야 하는 데이터 값을 지정할 수 있습니다. # * split_digits (int 또는 None, optional, default=None): 분할 지점의 유효 숫자 자릿수입니다. split_digits가 0으로 설정되면 분할 지점은 정수로 처리됩니다. None인 경우, 분할 지점의 모든 유효 숫자 자릿수가 고려됩니다. # * categorical_variables (array-like 또는 None, optional, default=None): 범주형 변수로 간주할 수치 변수의 리스트입니다. 이는 명목 변수입니다. 타겟 유형이 다중 클래스인 경우에는 해당되지 않습니다. # * binning_fit_params (dict 또는 None, optional, default=None): 특정 변수에 대한 최적 selection_criteria = { "iv": {"min": 0.025, "max": 0.7, "strategy": "highest", "top": 20}, "quality_score": {"min": 0.01}, } # > #### Feature Selection 프로세스를 간소화 할 수 있습니다. 조건을 정해두면 해당조건에 맞는 변수를 추출할 수 있습니다. binning_process = BinningProcess( feature_list, categorical_variables=cat_features, selection_criteria=selection_criteria, ) X = df[feature_list] y = df["target"] binning_process.fit(X, y) binning_process.information(print_level=2) binning_process.summary() # > #### 선택된 변수만 보고싶다면 아래 코드를 참조해주세요. # summary = binning_process.summary() selected_summary = summary[summary["selected"] == True] selected_summary optb = binning_process.get_binned_variable("D_42_mean") optb.binning_table.build() # > #### 임의로 변수를 선택하기 때문에 위 변수가 없다면 에러가 날 수 있습니다. 확인 후 실행 바랍니다. # > #### 📊 구간화 그래프를 살펴봅시다. binning_table.plot()을 사용하면 쉽게 그래프를 그릴 수 있습니다. optb.binning_table.plot(metric="event_rate") # > #### 위 변수 같은 경우는, 2 구간에서 불량율이 매우 높게 나타납니다. 이벤트 발생 확률 또한 단조증가함을 알 수 있습니다. 일반적으로 구간화는 이벤트 발생 확률이 단조 증가하면 좋다고 판단합니다. # > ##### optb.binning_table.plot(metric="event_rate")에서 metric은 그래프를 그릴 때 사용할 메트릭(metric)을 지정하는 매개변수입니다. 이 메트릭은 각 구간(bin)의 값을 기준으로 그래프를 그릴 때 사용됩니다. 예를 들어, "event_rate"를 지정하면 각 구간의 이벤트 발생 비율을 기준으로 그래프를 그립니다. # > ##### optb.binning_table.plot()은 optbinning 패키지에서 사용되는 BinningTable 객체의 메서드입니다. BinningTable 객체는 변수의 구간(bin) 정보를 저장하고 분석 결과를 시각화할 수 있는 기능을 제공합니다. 이를 통해 각 구간의 특성을 시각적으로 확인하고 모델 개발에 유용한 통찰력을 얻을 수 있습니다. plot() 메서드를 사용하여 구간별 메트릭에 대한 그래프를 생성하고 시각화할 수 있습니다. metric 매개변수를 적절히 설정하여 원하는 메트릭에 대한 그래프를 생성할 수 있습니다. optb = binning_process.get_binned_variable("D_68_last") optb.binning_table.build() optb.binning_table.plot(metric="event_rate") # > #### 어떤 변수들이 선택되었는지 살펴봅시다. binning_process.get_support(names=True) # > #### 이전 노트북에서 직접 IV와 WOE함수를 만들어 데이터를 변환했던 것을 기억하시나요? 이 작업을 OptBinning을 사용하면 한줄의 코드로 실행 가능합니다. 아래 코드를 살펴보시죠. X_transform = binning_process.transform(X, metric="woe") X_transform # > #### 기존 변수 값이 WOE 값으로 변환(transform)되었습니다. 이제 WOE값을 사용해 모델링을 진행하면 됩니다. from sklearn.linear_model import LogisticRegression from optbinning import Scorecard from optbinning.scorecard import Counterfactual binning_process = BinningProcess( feature_list, categorical_variables=cat_features, selection_criteria=selection_criteria, ) estimator = LogisticRegression(solver="lbfgs") scorecard = Scorecard( binning_process=binning_process, estimator=estimator, scaling_method="min_max", scaling_method_params={"min": 300, "max": 850}, ) scorecard.fit(X, y) # > #### 가장 간단한 로지스틱 회귀를 사용해보겠습니다. estimator에 XGBoost나 LighGBM을 사용해도 좋습니다. # > #### 스코어 카드를 바로 만들어 보겠습니다. 최저 점수는 300점, 최대 점수는 850점입니다. scorecard.table(style="summary") # > #### 위에서 만든 스코어 카드 테이블을 살펴보겠습니다. scorecard.table(style="detailed") # > #### 보다 자세한 테이블은 위와 같습니다. WoE와 IV, JS 값까지 보여줍니다. # > ##### "JS Distance"는 각 구간(bin)의 신용 점수 분포 간의 거리(Distance)를 측정하는 지표입니다. # > ##### 스코어카드에서 "JS Distance"는 구간(bin)별로 신용 점수가 얼마나 다른지를 나타내는 지표로 사용됩니다. 구간별 신용 점수 분포가 서로 다를수록 "JS Distance"는 높아지며, 구간간의 분리도가 높음을 나타냅니다. 이는 스코어카드가 독립 변수의 변화에 따라 점수를 적절하게 조정하여 신용 등급을 산출할 수 있도록 도와줍니다. # sc = scorecard.table(style="summary") sc.groupby("Variable").agg({"Points": [np.min, np.max]}).sum() # > #### 위 코드는 점수가 잘 나왔는지 검증하는 코드입니다. # > #### 📊 만든 스코어를 평가해봅시다. y_pred = scorecard.predict_proba(X)[:, 1] from optbinning.scorecard import plot_auc_roc, plot_cap, plot_ks plot_auc_roc(y, y_pred) # > #### 단순한 모델을 사용하니 AUC가 이전보다 떨어진것을 확인할 수 있습니다. 하지만 데이터 전처리가 잘된 편이라 점수가 아주 낮진 않습니다. plot_ks(y, y_pred) # > #### KS 통계량도 간단한 함수로 계산하고 시각화할 수 있습니다. # > #### 📊 스코어 분포를 살펴보도록 하죠. score = scorecard.score(X) import matplotlib.pyplot as plt mask = y == 0 plt.hist(score[mask], label="non-event", color="b", alpha=0.35) plt.hist(score[~mask], label="event", color="r", alpha=0.35) plt.xlabel("score") plt.legend() plt.show() # ### OptBinning 라이브러리를 사용한 모델 모니터링 # > #### PSI지표를 활용한 모델 드리프트 체크 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, stratify=y, random_state=42 ) # > #### 훈련 데이터세트와 테스트 데이터세트를 분할해줍니다. 여기서 테스트 데이터세트는 우리가 보지 못한 데이터라고 가정합니다. scorecard.fit(X_train, y_train) from optbinning.scorecard import ScorecardMonitoring monitoring = ScorecardMonitoring( scorecard=scorecard, psi_method="cart", psi_n_bins=10, verbose=True ) monitoring.fit(X_test, y_test, X_train, y_train) monitoring.psi_table() # > #### 굉장히 안정적입니다. 그래프도 쉽게 그릴 수 있습니다. monitoring.psi_plot() monitoring.tests_table() # > ##### 이 통계적 검정은 이벤트 비율(카이제곱 검정 - 이진 타겟) 또는 평균(스튜던트 t-검정 - 연속 타겟)이 유의하게 다른지 여부를 결정하기 위해 수행됩니다. 귀무가설은 실제값(actual)이 예상값(expected)과 동일하다는 것입니다. monitoring.system_stability_report() # > #### 시스템 리포트를 통해 확인할 수도 있습니다. monitoring.psi_variable_table(style="summary")
false
0
4,467
5
4,493
4,467
129807397
<jupyter_start><jupyter_text>University Students Complaints & Reports📝👨‍🎓 The "Voices Heard" dataset is a comprehensive collection of reports and complaints submitted by students in a university setting. From academic grievances to campus safety concerns, this dataset offers a rich trove of insights into the student experience, providing valuable feedback for university administrators and educators. With its diverse range of feedback, "Voices Heard" offers a unique opportunity to gain a better understanding of the needs and concerns of students, and to develop data-driven solutions to enhance the university experience for all. . Kaggle dataset identifier: university-students-complaints-and-reports <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split import torch from torch.utils.data import DataLoader from torchtext.data.utils import get_tokenizer from torchtext.vocab import build_vocab_from_iterator from copy import deepcopy df = pd.read_csv( "/kaggle/input/university-students-complaints-and-reports/Datasetprojpowerbi.csv" ) df.head() df.shape df.isna().sum() df["Genre"].value_counts() # # Merging values into one overview df["overview"] = ( df["Reports"] + " Age: " + df["Age"].astype(str) + ". GPA: " + df["Gpa"].astype(str) + ". Year: " + df["Year"].astype(str) + " . Gender: " + df["Gender"].astype(str) ) un = df["Genre"].unique() j = 0 labels = dict() for i in un: labels[i] = j j += 1 df["Genre"] = df["Genre"].map(labels) # # Getting the most important features dataset = df[["Genre", "overview"]].copy() train, test = train_test_split(dataset.values, random_state=42, test_size=0.2) # # Data pipeline tokenizer = get_tokenizer("basic_english") def yield_tokens(x): cat, txt = df.iloc[:, 0].values, df.iloc[:, -1].values for _, text in zip(cat, txt): yield tokenizer(text) vocab = build_vocab_from_iterator(yield_tokens(dataset), specials=["<unk>"]) vocab.set_default_index(vocab["<unk>"]) text_pipeline = lambda x: vocab(tokenizer(x)) def collate_batch(batch): label_list, text_list, offsets = [], [], [0] for _label, _text in batch: label_list.append(_label) processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64) text_list.append(processed_text) offsets.append(processed_text.size(0)) label_list = torch.tensor(label_list, dtype=torch.int64) offsets = torch.tensor(offsets[:-1]).cumsum(dim=0) text_list = torch.cat(text_list) return label_list.to(device), text_list.to(device), offsets.to(device) train_dataloader = DataLoader( train, batch_size=8, shuffle=True, collate_fn=collate_batch ) validation_loader = DataLoader( test, batch_size=8, shuffle=False, collate_fn=collate_batch ) # # Simple MLP-like Text Classification Moel # ![img.JPG](attachment:f25caa67-fd59-481d-9479-fe5a75cb904e.JPG) class ComplaintClassification(torch.nn.Module): def __init__(self, vocab_size, embed_dim, num_class): super(ComplaintClassification, self).__init__() self.embed = torch.nn.EmbeddingBag(vocab_size, embed_dim, sparse=False) self.layer = torch.nn.Sequential( torch.nn.Linear(embed_dim, 128), torch.nn.Linear(128, 32), torch.nn.Linear(32, num_class), ) def forward(self, x, off): x = self.embed(x, off) return self.layer(x) num_classes = 11 vocab_size = len(vocab) emsize = 64 model = ComplaintClassification(vocab_size, emsize, num_classes) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # # Optimizer is Adam and Loss is CorssEntropyLoss optimizer = torch.optim.Adam(model.parameters(), lr=0.1) criterion = torch.nn.CrossEntropyLoss() # # Training process with validation loss EPOCHS = 15 best_model = deepcopy(model) prev = 0 for i in range(1, EPOCHS + 1): model.train() total_acc, total_count = 0, 0 for label, text, offsets in train_dataloader: optimizer.zero_grad() output = model(text, offsets) loss = criterion(output, label) loss.backward() optimizer.step() total_acc += loss.item() total_count += 1 print("Epoch {} training loss : {}".format(i, total_acc / total_count)) model.eval() val_acc = 0 val_count = 0 with torch.no_grad(): for label, text, offsets in validation_loader: output = model(text, offsets) val_acc += (output.argmax(1) == label).sum().item() val_count += label.size(0) acc = val_acc / val_count if acc > prev: best_model = deepcopy(model) prev = acc print("Epoch {} validation loss : {}".format(i, val_acc / val_count)) # # Testing model to predict labels for some real world scenarios def predict(text, text_pipeline): with torch.no_grad(): text = torch.tensor(text_pipeline(text)) output = model(text, torch.tensor([0])) return output.argmax(1).item() reverse = dict() for i, j in labels.items(): reverse[j] = i val = "I cannot pay my tuition fees, because of technical issues. Age: 21. GPA: 4.62. Year: 1 . Gender: F" reverse[predict(val, text_pipeline)]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/807/129807397.ipynb
university-students-complaints-and-reports
omarsobhy14
[{"Id": 129807397, "ScriptId": 38601153, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11036701, "CreationDate": "05/16/2023 15:46:32", "VersionNumber": 1.0, "Title": "notebookaa06c2b654", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 167.0, "LinesInsertedFromPrevious": 167.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186180442, "KernelVersionId": 129807397, "SourceDatasetVersionId": 5672268}]
[{"Id": 5672268, "DatasetId": 3260867, "DatasourceVersionId": 5747799, "CreatorUserId": 11085604, "LicenseName": "Other (specified in description)", "CreationDate": "05/12/2023 19:46:45", "VersionNumber": 1.0, "Title": "University Students Complaints & Reports\ud83d\udcdd\ud83d\udc68\u200d\ud83c\udf93", "Slug": "university-students-complaints-and-reports", "Subtitle": "Voices Heard: Unleashing Insights from Student Feedback in University", "Description": "The \"Voices Heard\" dataset is a comprehensive collection of reports and complaints submitted by students in a university setting. From academic grievances to campus safety concerns, this dataset offers a rich trove of insights into the student experience, providing valuable feedback for university administrators and educators. With its diverse range of feedback, \"Voices Heard\" offers a unique opportunity to gain a better understanding of the needs and concerns of students, and to develop data-driven solutions to enhance the university experience for all. .", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3260867, "CreatorUserId": 11085604, "OwnerUserId": 11085604.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6264732.0, "CurrentDatasourceVersionId": 6344561.0, "ForumId": 3326442, "Type": 2, "CreationDate": "05/12/2023 19:46:45", "LastActivityDate": "05/12/2023", "TotalViews": 11588, "TotalDownloads": 1576, "TotalVotes": 43, "TotalKernels": 8}]
[{"Id": 11085604, "UserName": "omarsobhy14", "DisplayName": "Omar Sobhy", "RegisterDate": "07/19/2022", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split import torch from torch.utils.data import DataLoader from torchtext.data.utils import get_tokenizer from torchtext.vocab import build_vocab_from_iterator from copy import deepcopy df = pd.read_csv( "/kaggle/input/university-students-complaints-and-reports/Datasetprojpowerbi.csv" ) df.head() df.shape df.isna().sum() df["Genre"].value_counts() # # Merging values into one overview df["overview"] = ( df["Reports"] + " Age: " + df["Age"].astype(str) + ". GPA: " + df["Gpa"].astype(str) + ". Year: " + df["Year"].astype(str) + " . Gender: " + df["Gender"].astype(str) ) un = df["Genre"].unique() j = 0 labels = dict() for i in un: labels[i] = j j += 1 df["Genre"] = df["Genre"].map(labels) # # Getting the most important features dataset = df[["Genre", "overview"]].copy() train, test = train_test_split(dataset.values, random_state=42, test_size=0.2) # # Data pipeline tokenizer = get_tokenizer("basic_english") def yield_tokens(x): cat, txt = df.iloc[:, 0].values, df.iloc[:, -1].values for _, text in zip(cat, txt): yield tokenizer(text) vocab = build_vocab_from_iterator(yield_tokens(dataset), specials=["<unk>"]) vocab.set_default_index(vocab["<unk>"]) text_pipeline = lambda x: vocab(tokenizer(x)) def collate_batch(batch): label_list, text_list, offsets = [], [], [0] for _label, _text in batch: label_list.append(_label) processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64) text_list.append(processed_text) offsets.append(processed_text.size(0)) label_list = torch.tensor(label_list, dtype=torch.int64) offsets = torch.tensor(offsets[:-1]).cumsum(dim=0) text_list = torch.cat(text_list) return label_list.to(device), text_list.to(device), offsets.to(device) train_dataloader = DataLoader( train, batch_size=8, shuffle=True, collate_fn=collate_batch ) validation_loader = DataLoader( test, batch_size=8, shuffle=False, collate_fn=collate_batch ) # # Simple MLP-like Text Classification Moel # ![img.JPG](attachment:f25caa67-fd59-481d-9479-fe5a75cb904e.JPG) class ComplaintClassification(torch.nn.Module): def __init__(self, vocab_size, embed_dim, num_class): super(ComplaintClassification, self).__init__() self.embed = torch.nn.EmbeddingBag(vocab_size, embed_dim, sparse=False) self.layer = torch.nn.Sequential( torch.nn.Linear(embed_dim, 128), torch.nn.Linear(128, 32), torch.nn.Linear(32, num_class), ) def forward(self, x, off): x = self.embed(x, off) return self.layer(x) num_classes = 11 vocab_size = len(vocab) emsize = 64 model = ComplaintClassification(vocab_size, emsize, num_classes) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # # Optimizer is Adam and Loss is CorssEntropyLoss optimizer = torch.optim.Adam(model.parameters(), lr=0.1) criterion = torch.nn.CrossEntropyLoss() # # Training process with validation loss EPOCHS = 15 best_model = deepcopy(model) prev = 0 for i in range(1, EPOCHS + 1): model.train() total_acc, total_count = 0, 0 for label, text, offsets in train_dataloader: optimizer.zero_grad() output = model(text, offsets) loss = criterion(output, label) loss.backward() optimizer.step() total_acc += loss.item() total_count += 1 print("Epoch {} training loss : {}".format(i, total_acc / total_count)) model.eval() val_acc = 0 val_count = 0 with torch.no_grad(): for label, text, offsets in validation_loader: output = model(text, offsets) val_acc += (output.argmax(1) == label).sum().item() val_count += label.size(0) acc = val_acc / val_count if acc > prev: best_model = deepcopy(model) prev = acc print("Epoch {} validation loss : {}".format(i, val_acc / val_count)) # # Testing model to predict labels for some real world scenarios def predict(text, text_pipeline): with torch.no_grad(): text = torch.tensor(text_pipeline(text)) output = model(text, torch.tensor([0])) return output.argmax(1).item() reverse = dict() for i, j in labels.items(): reverse[j] = i val = "I cannot pay my tuition fees, because of technical issues. Age: 21. GPA: 4.62. Year: 1 . Gender: F" reverse[predict(val, text_pipeline)]
false
1
1,457
0
1,612
1,457
129993231
<jupyter_start><jupyter_text>27 Class Sign Language Dataset ### Abstract To contribute to the development of technologies, that can reduce the communication problems of speech-impaired persons, a new dataset was presented with this work. The dataset was created by processing American Sign Language-based photographs collected from 173 volunteer individuals. Details of the dataset are published in the paper: [Mavi. A., and Dikle, Z. (2022). A New 27 Class Sign Language Dataset Collected from 173 Individuals. *arXiv:2203.03859*](https://arxiv.org/abs/2203.03859) <img src="https://raw.githubusercontent.com/ardamavi/Vocalize-Sign-Language/master/Assets/Samples_Kaggle_Image.png"> ### Data Usage: Data can be used for research and/or commercial purposes by citing to the original paper below. ### Cite as: Mavi. A., and Dikle, Z. (2022). A New 27 Class Sign Language Dataset Collected from 173 Individuals. [*arXiv:2203.03859*](https://arxiv.org/abs/2203.03859) Kaggle dataset identifier: 27-class-sign-language-dataset <jupyter_script>import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_hub as tf_hub from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense my_seed = 42 split_proportion = 0.1 input_dir = os.path.join("..", "input", "27-class-sign-language-dataset") x_filename = os.path.join(input_dir, "X.npy") y_filename = os.path.join(input_dir, "Y.npy") x = np.load(x_filename) y = np.load(y_filename) # shuffle and split the data split_number = int(split_proportion * x.shape[0]) np.random.seed(my_seed) np.random.shuffle(x) val_x = tf.convert_to_tensor(x[:split_number]) test_x = tf.convert_to_tensor(x[split_number : 2 * split_number]) train_x = tf.convert_to_tensor(x[2 * split_number :]) np.random.seed(my_seed) np.random.shuffle(y) val_y = tf.convert_to_tensor(y[:split_number]) test_y = tf.convert_to_tensor(y[split_number : 2 * split_number]) train_y = tf.convert_to_tensor(y[2 * split_number :]) # visualize images with labels plt.figure() for count, x_index in enumerate(np.random.randint(0, train_x.shape[0], size=(9,))): plt.subplot(3, 3, count + 1) plt.imshow(train_x[x_index]) plt.title(f"label: {train_y[x_index]}") plt.tight_layout() plt.show() label_dict = {} for number, label in enumerate(np.unique(train_y)): label_dict[number] = label print(label_dict, x.shape) # help(tf_hub.KerasLayer) # model_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4" # model_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_050_96/classification/5" # extractor = tf_hub.KerasLayer(model_url) # extractor.build([None, 128, 128, 3]) # feature_extractor.trainable = False extractor = tf.keras.applications.MobileNetV3Small( input_shape=train_x.shape[1:], include_top=False, weights="imagenet" ) number_classes = len(label_dict.keys()) model = Sequential( [ extractor, tf.keras.layers.GlobalAveragePooling2D(), Dense(number_classes, activation=None), ] ) # model.build([None, 128, 128, 3]) _ = model(train_x[0:1]) model.summary() model.compile( optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) train_y = tf.convert_to_tensor(np.random.randint(0, 27, size=train_y.shape[:-1])) train_y.shape _ = model(train_x[0:3]) _.shape, train_y[0:3].shape model.fit(x=train_x, y=train_y, batch_size=32, epochs=100) model.fit(x=train_x, y=train_y, batch_size=32, epochs=100) help(model.fit) history = model.fit( train_dataset, epochs=100, steps_per_epoch=STEPS_PER_EPOCH, validation_data=valid_dataset, validation_steps=VALID_STEPS, callbacks=[checkpoint_cb, early_stopping_cb, lr_scheduler], class_weight=class_weight, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/993/129993231.ipynb
27-class-sign-language-dataset
ardamavi
[{"Id": 129993231, "ScriptId": 38667207, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1950998, "CreationDate": "05/18/2023 02:06:57", "VersionNumber": 1.0, "Title": "tf_lite_sign", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 112.0, "LinesInsertedFromPrevious": 112.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186442972, "KernelVersionId": 129993231, "SourceDatasetVersionId": 3263022}]
[{"Id": 3263022, "DatasetId": 1976854, "DatasourceVersionId": 3313436, "CreatorUserId": 1084733, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "03/06/2022 18:52:55", "VersionNumber": 1.0, "Title": "27 Class Sign Language Dataset", "Slug": "27-class-sign-language-dataset", "Subtitle": "27 Class ASL-Based Sign Language Dataset Collected from 173 Individuals", "Description": "### Abstract\nTo contribute to the development of technologies, that can reduce the communication problems of speech-impaired persons, a new dataset was presented with this work. The dataset was created by processing American Sign Language-based photographs collected from 173 volunteer individuals.\nDetails of the dataset are published in the paper: [Mavi. A., and Dikle, Z. (2022). A New 27 Class Sign Language Dataset Collected from 173 Individuals. *arXiv:2203.03859*](https://arxiv.org/abs/2203.03859)\n\n<img src=\"https://raw.githubusercontent.com/ardamavi/Vocalize-Sign-Language/master/Assets/Samples_Kaggle_Image.png\">\n\n### Data Usage:\nData can be used for research and/or commercial purposes by citing to the original paper below.\n\n### Cite as:\nMavi. A., and Dikle, Z. (2022). A New 27 Class Sign Language Dataset Collected from 173 Individuals. [*arXiv:2203.03859*](https://arxiv.org/abs/2203.03859)\n\n### Acknowledgements\nThanks to all of the volunteer students and teachers from Ayranc\u0131 Anadolu High School for their help in collecting data.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1976854, "CreatorUserId": 1084733, "OwnerUserId": 1084733.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3263022.0, "CurrentDatasourceVersionId": 3313436.0, "ForumId": 2001121, "Type": 2, "CreationDate": "03/06/2022 18:52:55", "LastActivityDate": "03/06/2022", "TotalViews": 7540, "TotalDownloads": 608, "TotalVotes": 21, "TotalKernels": 1}]
[{"Id": 1084733, "UserName": "ardamavi", "DisplayName": "Arda Mavi", "RegisterDate": "05/21/2017", "PerformanceTier": 0}]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_hub as tf_hub from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense my_seed = 42 split_proportion = 0.1 input_dir = os.path.join("..", "input", "27-class-sign-language-dataset") x_filename = os.path.join(input_dir, "X.npy") y_filename = os.path.join(input_dir, "Y.npy") x = np.load(x_filename) y = np.load(y_filename) # shuffle and split the data split_number = int(split_proportion * x.shape[0]) np.random.seed(my_seed) np.random.shuffle(x) val_x = tf.convert_to_tensor(x[:split_number]) test_x = tf.convert_to_tensor(x[split_number : 2 * split_number]) train_x = tf.convert_to_tensor(x[2 * split_number :]) np.random.seed(my_seed) np.random.shuffle(y) val_y = tf.convert_to_tensor(y[:split_number]) test_y = tf.convert_to_tensor(y[split_number : 2 * split_number]) train_y = tf.convert_to_tensor(y[2 * split_number :]) # visualize images with labels plt.figure() for count, x_index in enumerate(np.random.randint(0, train_x.shape[0], size=(9,))): plt.subplot(3, 3, count + 1) plt.imshow(train_x[x_index]) plt.title(f"label: {train_y[x_index]}") plt.tight_layout() plt.show() label_dict = {} for number, label in enumerate(np.unique(train_y)): label_dict[number] = label print(label_dict, x.shape) # help(tf_hub.KerasLayer) # model_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4" # model_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_050_96/classification/5" # extractor = tf_hub.KerasLayer(model_url) # extractor.build([None, 128, 128, 3]) # feature_extractor.trainable = False extractor = tf.keras.applications.MobileNetV3Small( input_shape=train_x.shape[1:], include_top=False, weights="imagenet" ) number_classes = len(label_dict.keys()) model = Sequential( [ extractor, tf.keras.layers.GlobalAveragePooling2D(), Dense(number_classes, activation=None), ] ) # model.build([None, 128, 128, 3]) _ = model(train_x[0:1]) model.summary() model.compile( optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) train_y = tf.convert_to_tensor(np.random.randint(0, 27, size=train_y.shape[:-1])) train_y.shape _ = model(train_x[0:3]) _.shape, train_y[0:3].shape model.fit(x=train_x, y=train_y, batch_size=32, epochs=100) model.fit(x=train_x, y=train_y, batch_size=32, epochs=100) help(model.fit) history = model.fit( train_dataset, epochs=100, steps_per_epoch=STEPS_PER_EPOCH, validation_data=valid_dataset, validation_steps=VALID_STEPS, callbacks=[checkpoint_cb, early_stopping_cb, lr_scheduler], class_weight=class_weight, )
false
0
1,014
0
1,331
1,014
129993531
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import os from matplotlib import pyplot as plt import torch import torchvision from torch import nn as nn from torch.nn import functional as F from torch import optim as optim from torch.utils.data import DataLoader, Dataset, TensorDataset from torchvision import transforms from torchvision.transforms import Resize, ToTensor, Compose, Normalize import random from torch.autograd import Variable from torchvision import datasets import warnings from sklearn.model_selection import train_test_split warnings.filterwarnings("ignore") for f in os.listdir("/kaggle/input/digit-recognizer"): s = f.split(".")[0] print(s) try: exec(s + " = pd.read_csv('/kaggle/input/digit-recognizer/{}')".format(f)) except: pass train.head() train_X, val_X, train_y, val_y = train_test_split( train.drop("label", axis=1), train.label, test_size=0.2 ) train_X.reset_index(drop=True, inplace=True) train_y.reset_index(drop=True, inplace=True) val_X.reset_index(drop=True, inplace=True) val_y.reset_index(drop=True, inplace=True) transform = Compose( [ transforms.ToPILImage(), # Resize(size = (28,28)), # transforms.RandomCrop(28), # transforms.RandomHorizontalFlip(), ToTensor(), Normalize((0.131,), (0.3085,)), ] ) class MNISTDataSet(Dataset): def __init__(self, images, labels, transforms=None): self.X = images self.y = labels self.transforms = transforms def __len__(self): return len(self.X) def __getitem__(self, i): data = self.X.iloc[i, :] data = np.array(data).astype(np.uint8).reshape(28, 28, 1) if self.transforms: data = self.transforms(data) if self.y is not None: # train/val return (data, self.y[i]) else: return data train_set = MNISTDataSet(train_X, train_y, transform) trainload = DataLoader(train_set, batch_size=32, shuffle=True) val_set = MNISTDataSet(val_X, val_y, transform) valload = DataLoader(val_set, batch_size=32, shuffle=True) class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.cn1 = nn.Sequential( nn.Conv2d( in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=2 ), nn.BatchNorm2d(16), ) self.cn2 = nn.Sequential( nn.Conv2d( in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=2 ), nn.BatchNorm2d(32), ) self.cn3 = nn.Sequential( nn.Conv2d( in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=2 ), nn.BatchNorm2d(64), ) self.dp1 = nn.Dropout(0.1) self.dp2 = nn.Dropout(0.25) self.dp3 = nn.Dropout(0.4) self.fc1 = nn.Sequential(nn.Linear(1600, 1024), nn.BatchNorm1d(1024)) self.fc2 = nn.Sequential(nn.Linear(1024, 512), nn.BatchNorm1d(512)) self.fc3 = nn.Sequential(nn.Linear(512, 128), nn.BatchNorm1d(128)) self.fc4 = nn.Sequential(nn.Linear(128, 64), nn.BatchNorm1d(64)) self.fc5 = nn.Linear(64, 10) def forward(self, x): x = self.cn1(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.cn2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dp1(x) x = self.cn3(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dp2(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) x = F.relu(x) x = self.dp3(x) x = self.fc4(x) x = F.relu(x) x = self.fc5(x) digits = F.log_softmax(x, dim=1) return digits def get_num_correct(preds, labels): return preds.argmax(dim=1).eq(labels).sum().item() def get_mean_and_std(dataloader): channels_sum, channels_squared_sum, num_batches = 0, 0, 0 for data, _ in dataloader: # Mean over batch, height and width, but not over the channels channels_sum += torch.mean(data, dim=[0, 2, 3]) channels_squared_sum += torch.mean(data**2, dim=[0, 2, 3]) num_batches += 1 mean = channels_sum / num_batches # std = sqrt(E[X^2] - (E[X])^2) std = (channels_squared_sum / num_batches - mean**2) ** 0.5 return mean, std get_mean_and_std(trainload) def train(model, device, train_dataloader, val_dataloader, optim, epoch): model.train() for b_i, (X, y) in enumerate(train_dataloader): X, y = Variable(X).to(device), Variable(y).to(device) optim.zero_grad() pred_prob = model(X) loss = F.nll_loss(pred_prob, y) loss.backward() optim.step() model.eval() # eval mode val_loss = 0 val_correct = 0 with torch.no_grad(): for X, y in val_dataloader: X, y = Variable(X).to(device), Variable(y).to(device) preds = model(X) # get predictions loss = F.nll_loss(preds, y) # calculate the loss val_correct += get_num_correct(preds, y) val_loss = loss.item() * 32 print( "epoch: {}\t loss: {:.6f}\t val loss: {:.6f}\t accuracy: {:.2f}".format( epoch + 1, loss.item(), val_loss, (val_correct / len(val_X)) * 100 ) ) EPOCH = 5 device = "cuda:0" if torch.cuda.is_available() else "cpu" model = CNN().to(device) critetion = optim.Adadelta(model.parameters(), lr=0.05) print(device) for e in range(EPOCH): train(model, device, trainload, valload, critetion, e) test.reset_index(drop=True, inplace=True) test_set = MNISTDataSet(test, None, transform) testload = DataLoader(test_set, batch_size=32, shuffle=False) model.eval() # Safety first predictions = torch.LongTensor().to(device) # Tensor for all predictions # Go through the test set, saving the predictions in... 'predictions' for images in testload: preds = model(images.to(device)) predictions = torch.cat((predictions, preds.argmax(dim=1)), dim=0) sample_submission["Label"] = predictions.cpu().numpy() sample_submission.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/993/129993531.ipynb
null
null
[{"Id": 129993531, "ScriptId": 38649575, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 847705, "CreationDate": "05/18/2023 02:10:44", "VersionNumber": 1.0, "Title": "MNIST CNN PyTorch - 99,1%", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 204.0, "LinesInsertedFromPrevious": 204.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import os from matplotlib import pyplot as plt import torch import torchvision from torch import nn as nn from torch.nn import functional as F from torch import optim as optim from torch.utils.data import DataLoader, Dataset, TensorDataset from torchvision import transforms from torchvision.transforms import Resize, ToTensor, Compose, Normalize import random from torch.autograd import Variable from torchvision import datasets import warnings from sklearn.model_selection import train_test_split warnings.filterwarnings("ignore") for f in os.listdir("/kaggle/input/digit-recognizer"): s = f.split(".")[0] print(s) try: exec(s + " = pd.read_csv('/kaggle/input/digit-recognizer/{}')".format(f)) except: pass train.head() train_X, val_X, train_y, val_y = train_test_split( train.drop("label", axis=1), train.label, test_size=0.2 ) train_X.reset_index(drop=True, inplace=True) train_y.reset_index(drop=True, inplace=True) val_X.reset_index(drop=True, inplace=True) val_y.reset_index(drop=True, inplace=True) transform = Compose( [ transforms.ToPILImage(), # Resize(size = (28,28)), # transforms.RandomCrop(28), # transforms.RandomHorizontalFlip(), ToTensor(), Normalize((0.131,), (0.3085,)), ] ) class MNISTDataSet(Dataset): def __init__(self, images, labels, transforms=None): self.X = images self.y = labels self.transforms = transforms def __len__(self): return len(self.X) def __getitem__(self, i): data = self.X.iloc[i, :] data = np.array(data).astype(np.uint8).reshape(28, 28, 1) if self.transforms: data = self.transforms(data) if self.y is not None: # train/val return (data, self.y[i]) else: return data train_set = MNISTDataSet(train_X, train_y, transform) trainload = DataLoader(train_set, batch_size=32, shuffle=True) val_set = MNISTDataSet(val_X, val_y, transform) valload = DataLoader(val_set, batch_size=32, shuffle=True) class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.cn1 = nn.Sequential( nn.Conv2d( in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=2 ), nn.BatchNorm2d(16), ) self.cn2 = nn.Sequential( nn.Conv2d( in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=2 ), nn.BatchNorm2d(32), ) self.cn3 = nn.Sequential( nn.Conv2d( in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=2 ), nn.BatchNorm2d(64), ) self.dp1 = nn.Dropout(0.1) self.dp2 = nn.Dropout(0.25) self.dp3 = nn.Dropout(0.4) self.fc1 = nn.Sequential(nn.Linear(1600, 1024), nn.BatchNorm1d(1024)) self.fc2 = nn.Sequential(nn.Linear(1024, 512), nn.BatchNorm1d(512)) self.fc3 = nn.Sequential(nn.Linear(512, 128), nn.BatchNorm1d(128)) self.fc4 = nn.Sequential(nn.Linear(128, 64), nn.BatchNorm1d(64)) self.fc5 = nn.Linear(64, 10) def forward(self, x): x = self.cn1(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.cn2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dp1(x) x = self.cn3(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dp2(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) x = F.relu(x) x = self.dp3(x) x = self.fc4(x) x = F.relu(x) x = self.fc5(x) digits = F.log_softmax(x, dim=1) return digits def get_num_correct(preds, labels): return preds.argmax(dim=1).eq(labels).sum().item() def get_mean_and_std(dataloader): channels_sum, channels_squared_sum, num_batches = 0, 0, 0 for data, _ in dataloader: # Mean over batch, height and width, but not over the channels channels_sum += torch.mean(data, dim=[0, 2, 3]) channels_squared_sum += torch.mean(data**2, dim=[0, 2, 3]) num_batches += 1 mean = channels_sum / num_batches # std = sqrt(E[X^2] - (E[X])^2) std = (channels_squared_sum / num_batches - mean**2) ** 0.5 return mean, std get_mean_and_std(trainload) def train(model, device, train_dataloader, val_dataloader, optim, epoch): model.train() for b_i, (X, y) in enumerate(train_dataloader): X, y = Variable(X).to(device), Variable(y).to(device) optim.zero_grad() pred_prob = model(X) loss = F.nll_loss(pred_prob, y) loss.backward() optim.step() model.eval() # eval mode val_loss = 0 val_correct = 0 with torch.no_grad(): for X, y in val_dataloader: X, y = Variable(X).to(device), Variable(y).to(device) preds = model(X) # get predictions loss = F.nll_loss(preds, y) # calculate the loss val_correct += get_num_correct(preds, y) val_loss = loss.item() * 32 print( "epoch: {}\t loss: {:.6f}\t val loss: {:.6f}\t accuracy: {:.2f}".format( epoch + 1, loss.item(), val_loss, (val_correct / len(val_X)) * 100 ) ) EPOCH = 5 device = "cuda:0" if torch.cuda.is_available() else "cpu" model = CNN().to(device) critetion = optim.Adadelta(model.parameters(), lr=0.05) print(device) for e in range(EPOCH): train(model, device, trainload, valload, critetion, e) test.reset_index(drop=True, inplace=True) test_set = MNISTDataSet(test, None, transform) testload = DataLoader(test_set, batch_size=32, shuffle=False) model.eval() # Safety first predictions = torch.LongTensor().to(device) # Tensor for all predictions # Go through the test set, saving the predictions in... 'predictions' for images in testload: preds = model(images.to(device)) predictions = torch.cat((predictions, preds.argmax(dim=1)), dim=0) sample_submission["Label"] = predictions.cpu().numpy() sample_submission.to_csv("submission.csv", index=False)
false
0
2,217
0
2,217
2,217
129870801
<jupyter_start><jupyter_text>AMEX_data_sampled Kaggle dataset identifier: amex-data-sampled <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ### 4강) TOAD 라이브러리를 활용한 신용평가 모델 개발 # > ### 데이터 준비 import warnings warnings.filterwarnings("ignore", module="sklearn.metrics.cluster") def drop_null_cols(df, threshold=0.8): """ 데이터프레임에서 결측치 비율이 threshold 이상인 변수를 제거하는 함수 """ null_percent = df.isnull().mean() drop_cols = list(null_percent[null_percent >= threshold].index) df = df.drop(drop_cols, axis=1) print(f"Dropped {len(drop_cols)} columns: {', '.join(drop_cols)}") return df df = pd.read_pickle("/kaggle/input/amex-data-sampled/train_df_sample.pkl") df = df.reset_index() import hashlib def encode_customer_id(id_str): encoded_id = hashlib.sha256(id_str.encode("utf-8")).hexdigest()[:16] return encoded_id df["customer_ID"] = df["customer_ID"].apply(encode_customer_id) df = drop_null_cols(df) cat_features = [ "B_30", "B_38", "D_114", "D_116", "D_117", "D_120", "D_126", "D_63", "D_64", "D_68", ] cat_features = [f"{cf}_last" for cf in cat_features] import random num_cols = df.select_dtypes(include=np.number).columns.tolist() num_cols = [col for col in num_cols if "target" not in col and col not in cat_features] num_cols_sample = random.sample([col for col in num_cols if "target" not in col], 100) feature_list = num_cols_sample + cat_features all_list = feature_list + ["target"] df = df[all_list] for categorical_feature in cat_features: if df[categorical_feature].dtype == "float16": df[categorical_feature] = df[categorical_feature].astype(str) if df[categorical_feature].dtype == "category": df[categorical_feature] = df[categorical_feature].astype(str) elif df[categorical_feature].dtype == "object": df[categorical_feature] = df[categorical_feature].astype(str) from sklearn.preprocessing import LabelEncoder le_encoder = LabelEncoder() for categorical_feature in cat_features: df[categorical_feature].fillna(value="NaN", inplace=True) df[categorical_feature] = le_encoder.fit_transform(df[categorical_feature]) from sklearn.impute import SimpleImputer def impute_nan(df, num_cols, strategy="mean"): """ NaN 값을 strategy에 따라 num_cols에 대해 impute하는 함수 :param df: DataFrame :param num_cols: list, imputation 대상 numeric column 리스트 :param strategy: str, imputation 전략 (default: 'mean') :return: DataFrame, imputed DataFrame """ imputer = SimpleImputer(strategy=strategy) df[num_cols] = imputer.fit_transform(df[num_cols]) return df df = impute_nan(df, num_cols_sample, strategy="mean") # > #### taod 패키지 설치 # > #### IV값을 기준으로 변수를 선택해보자. (Feature Selection) import toad # 타겟 변수 정의 target = "target" # 정보값(IV) 계산 및 특성 선택 iv_df = toad.quality(df, target=target, iv_only=True) selected_features = iv_df[iv_df["iv"] > 0.1].index # 'name' 대신 'index'를 사용합니다. # WOE 변환 trans = toad.transform.WOETransformer() df_woe = trans.fit_transform(df[selected_features], df[target]) # 이제 df_woe를 추가적인 모델링에 사용할 수 있습니다. # toad는 파이썬의 데이터 전처리 및 탐색을 위한 오픈 소스 패키지입니다. 주요 기능은 다음과 같습니다: # 데이터 품질 평가: toad.quality() 함수를 사용하여 변수별 정보값(IV, Information Value)을 계산하고, 타겟 변수와의 상관 관계를 평가합니다. 이를 통해 변수의 중요도를 판단하고 특성 선택에 활용할 수 있습니다. # 데이터 변환: WOE (Weight of Evidence) 변환을 위한 toad.transform.WOETransformer()를 사용할 수 있습니다. WOE는 범주형 변수나 연속형 변수를 범주화하여 정보값을 측정하는 방법으로, 변수의 예측력을 개선하고 모델 성능을 향상시킬 수 있습니다. df_woe # > #### 선택된 피처를 기반으로 binning 작업을 진행한다. 이번엔 방법을 조금 다르게해서, 각 구간이 최소 5%의 데이터를 가지도록 구간화를 수행한다. combiner = toad.transform.Combiner() combiner.fit(df[selected_features], y=df[target], method="chi", min_samples=0.05) # min_samples = 0.05로 설정하면, 각 구간이 최소 5%의 데이터를 가지도록 구간화를 수행합니다. # 구간화 결과를 살펴봅시다. binning_result = combiner.export() # > #### iv=True는 toad.plot 함수의 매개변수 중 하나입니다. 이 매개변수를 True로 설정하면, 그래프에 Information Value (IV) 값을 표시합니다. toad.detect(df)[:10] # > #### toad.detect(df)의 결과는 변수 이름과 해당 변수의 데이터 유형을 포함한 데이터프레임으로 반환됩니다. 결과에서 첫 10개의 변수와 그에 해당하는 데이터 유형을 확인할 수 있습니다. toad.quality(df, "target", iv_only=True)[:15] # > #### 각 변수별 IV값을 살펴보자. # #### Feature Selection # 💡 Notes: # * empty=0.9: 결측치 비율이 90%보다 큰 특성들은 필터링됩니다. # * iv=0.02: IV(Information Value)가 0.02보다 작은 특성들은 제거됩니다. # * corr=0.7: 두 개 이상의 특성들 간의 피어슨 상관계수가 0.7보다 큰 경우, IV가 더 낮은 특성들이 제거됩니다. # * return_drop=False: True로 설정하면, 함수는 삭제된 열들의 리스트를 반환합니다. # * exclude=None: 알고리즘에서 제외할 특성들의 리스트를 입력합니다. 일반적으로 ID 열이나 월(Month) 열 등이 해당됩니다. train_selected, dropped = toad.selection.select( df, target="target", empty=0.5, iv=0.05, corr=0.7, return_drop=True, exclude=["D_117_last"], ) print(dropped) print(train_selected.shape) # > #### toad.selection.select 함수는 선택된 특성들로 이루어진 훈련 데이터셋인 train_selected와 삭제된 열들의 리스트인 dropped를 반환합니다. 또한, train_selected.shape는 선택된 특성들을 포함하는 훈련 데이터셋의 크기를 출력합니다. # initialise c = toad.transform.Combiner() # Train binning with the selected features from previous; use reliable Chi-squared binning, and control that each bucket has at least 5% sample. c.fit( train_selected, y="target", method="chi", min_samples=0.05, exclude=["D_117_last"] ) print("D_59_min:", c.export()["D_59_min"]) print("R_15_std:", c.export()["R_15_std"]) print("S_3_last:", c.export()["S_3_last"]) # > ##### 위 코드는 c.export()를 사용하여 binning 결과 중 특정 변수들의 정보를 출력하는 부분입니다. # c.export(): Combiner 객체 c의 binning 결과를 딕셔너리 형태로 반환합니다. 딕셔너리의 키는 각 변수의 이름이며, 값은 해당 변수의 binning 정보를 담고 있는 객체입니다. # 각 변수에 대한 binning 정보는 해당 변수의 구간(bin)과 해당 구간의 라벨(label) 등을 포함하고 있습니다. 출력된 정보를 통해 각 변수의 binning 결과를 확인할 수 있습니다. from toad.plot import bin_plot # 학습 데이터(train_selected)에서 'var_d2' 변수의 bin 결과를 확인합니다. col = "D_59_min" # 시각화를 위해 'labels = True'로 설정하는 것이 좋습니다. bin_plot( c.transform(train_selected[[col, "target"]], labels=True), x=col, target="target" ) # > ##### 위 코드는 bin_plot 함수를 사용하여 'R_4_std' 변수의 binning 결과를 시각화하는 부분입니다. # > ##### c.transform(train_selected[[col, 'target']], labels=True): 학습 데이터(train_selected)의 'R_4_std' 변수와 목표 변수('target')를 선택하여 binning 결과를 생성합니다. labels=True로 설정하여 bin의 라벨을 포함한 결과를 반환합니다. # > ##### bin_plot(...): binning 결과를 시각화합니다. x축에는 'R_4_std' 변수의 값이 나타나며, target 변수('target')의 값에 따라 각 bin의 분포를 확인할 수 있습니다. bin_plot 함수를 통해 binning 결과를 시각적으로 확인하여 데이터의 패턴을 파악할 수 있습니다. # 학습 데이터(train_selected)에서 'D_51_min' 변수의 bin 결과를 확인합니다. col = "S_3_last" # 범주형 변수의 경우 'labels = True'로 설정하는 것이 좋습니다. bin_plot( c.transform(train_selected[[col, "target"]], labels=True), x=col, target="target" ) # Toad의 binning 기능은 범주형 및 수치형 변수를 모두 지원합니다. "toad.transform.Combiner()" 클래스를 사용하여 학습하며, 절차는 다음과 같습니다: # * 초기화(initalise) : c = toad.transform.Combiner() # * *train binning*: c.fit(dataframe, y='target', method='chi', min_samples=None, n_bins=None, empty_separate=False) # * y: 목표 변수; # * method: binning에 적용할 방법. 'chi' (카이제곱), 'dt' (의사결정 트리), 'kmeans' (K-means), 'quantile' (동일한 백분위수 기준), 'step' (동일한 간격)을 지원합니다. # * min_samples: 샘플당 요구되는 최소 수 또는 비율. 각 버킷에 필요한 최소 샘플 수 / 비율입니다. # * n_bins: 최소한의 버킷 수. 수가 너무 큰 경우, 알고리즘은 얻을 수 있는 최대 버킷 수를 반환합니다. # * empty_separate: 누락된 값이 버킷 내에 별도로 분리되는지 여부. False인 경우, 누락된 값은 가장 가까운 나쁜 비율 버킷과 함께 배치됩니다. # * binning 결과: c.export() # * bins 조정: c.update(dict) # * bins 적용 및 이산값으로 변환: c.transform(dataframe, labels=False) # * labels: 데이터를 설명 라벨로 변환할지 여부. False인 경우 0, 1, 2...로 반환됩니다. 범주형 변수는 비율의 내림차순으로 정렬됩니다. True인 경우 (-무한대, 0], (0, 10], (10, 무한대)와 같이 반환됩니다. # * 참고: 1. 불필요한 열을 제외하는 것을 잊지 마세요. 특히 ID 열과 타임스탬프 열은 제외해야 합니다.. 2. 고유 값이 많은 열은 학습에 많은 시간이 소요될 수 있습니다.* # 초기화 transer = toad.transform.WOETransformer() # transer.fit_transform() 및 combiner.transform()을 적용합니다. target을 제외하도록 주의하세요. train_woe = transer.fit_transform( c.transform(train_selected), train_selected["target"], exclude="target" ) train_woe.head(3) # * WOETransformer를 초기화합니다. # * combiner.transform()의 결과에 transer.fit_transform()을 적용합니다. # * target을 제외하도록 주의하세요. # * 변환된 train_woe 데이터프레임의 처음 3개 행을 출력합니다. col = train_woe.columns.tolist()[:-1] col.remove("D_117_last") # > #### 예시를 위해 제거했던 변수를 제거해줍니다. # > #### 예시를 위해 간단한 로지스틱 회귀 모델을 적합해봅니다. from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(train_woe[col], train_woe["target"]) # 훈련 데이터와 Out-of-Time (OOT) 데이터에 대해 예측된 확률을 구합니다. pred_train = lr.predict_proba(train_woe[col])[:, 1] # > #### Toad 역시 쉽게 KS 통계량과 AUC를 계산할 수 있는 라이브러리를 제공합니다. from toad.metrics import KS, AUC print("train KS", KS(pred_train, train_woe["target"])) print("train AUC", AUC(pred_train, train_woe["target"])) # > #### Toad에서도 OptBinning과 마찬가지로 스코어링을 할 수 있는 스코어 카드 기능을 제공합니다. card = toad.ScoreCard( combiner=c, transer=transer, C=0.1, base_score=600, base_odds=35, pdo=60, rate=2 ) card.fit(train_woe[col], train_woe["target"]) # # 💡 Note!: # * toad.ScoreCard는 Scorecard 모델을 생성하기 위한 클래스입니다. # * combiner는 binning 결과를 담고 있는 toad.transform.Combiner 객체를 전달합니다. # * transer는 WOE 변환을 담당하는 toad.transform.WOETransformer 객체를 전달합니다. # * C는 Logistic Regression에서의 규제 강도를 나타내는 매개변수입니다. # * base_score는 기준 스코어로, 기본적으로 모든 변수의 WOE 값이 0일 때의 스코어입니다. # * base_odds는 기준 오즈로, 기본적으로 모든 변수의 WOE 값이 0일 때의 오즈입니다. # * pdo는 Point to Double the Odds로, 오즈를 두배로 만들기 위해 필요한 점수의 차이입니다. # * rate는 모델의 점수 스케일을 조절하기 위한 비율입니다. # * fit() 메서드를 사용하여 Scorecard 모델을 훈련시킵니다. train_woe[col]은 독립 변수를, train_woe['target']은 종속 변수를 나타냅니다. sample_train_woe = train_woe.sample(3) score_sample = card.predict(sample_train_woe[col]) score_sample_rounded = score_sample.round().astype(int) for i, score in enumerate(score_sample_rounded, start=1): print(f"{i}번째 고객의 점수는 \033[1;34m{score}\033[0m점 입니다.") # # 💡 Note: # * 1. `sample_train_woe = train_woe.sample(3)`: `sample()` 함수를 사용하여 `train_woe` 데이터셋에서 임의로 3개의 샘플을 추출합니다. 이 샘플들은 점수를 계산할 데이터입니다. # * 2. `score_sample = card.predict(sample_train_woe[col])`: `card` 라는 ScoreCard 객체의 `predict()` 메서드를 사용하여 위에서 추출한 샘플에 대한 점수를 계산합니다. 이 때, `sample_train_woe[col]`을 통해 샘플의 해당 컬럼(변수)만 사용하게 됩니다. # * 3. `score_sample_rounded = score_sample.round().astype(int)`: 계산된 점수는 일반적으로 소수점 형태로 반환됩니다. 이 줄의 코드는 `round()` 함수를 사용하여 계산된 점수를 반올림하고, `astype(int)`를 통해 정수형으로 변환합니다. # * 4. `for i, score in enumerate(score_sample_rounded, start=1)`: 이 줄은 반복문을 사용하여 각 샘플에 대한 점수를 순서대로 출력합니다. `enumerate()` 함수는 반복 가능한 객체(여기서는 `score_sample_rounded`)를 입력으로 받아 인덱스 번호(i)와 그에 해당하는 값을(score) 함께 반환합니다. `start=1`은 인덱스 번호가 1부터 시작하도록 설정합니다. # * 5. `print(f"{i}번째 고객의 점수는 \033[1;34m{score}\033[0m점 입니다.")`: 이 줄은 문자열 포매팅을 사용하여 각 샘플에 대한 점수를 보기 좋게 출력합니다. `\033[1;34m`와 `\033[0m` 사이에 있는 텍스트는 파란색으로 출력됩니다. card.export()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/870/129870801.ipynb
amex-data-sampled
kimtaehun
[{"Id": 129870801, "ScriptId": 38605996, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13683894, "CreationDate": "05/17/2023 05:09:54", "VersionNumber": 1.0, "Title": "4\uac15) TOAD \ub77c\uc774\ube0c\ub7ec\ub9ac\ub97c \ud65c\uc6a9\ud55c \uc2e0\uc6a9\ud3c9\uac00 \ubaa8\ub378 \uac1c\ubc1c", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 329.0, "LinesInsertedFromPrevious": 329.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 186271118, "KernelVersionId": 129870801, "SourceDatasetVersionId": 5688399}]
[{"Id": 5688399, "DatasetId": 3270398, "DatasourceVersionId": 5763995, "CreatorUserId": 1885842, "LicenseName": "Unknown", "CreationDate": "05/15/2023 07:57:57", "VersionNumber": 1.0, "Title": "AMEX_data_sampled", "Slug": "amex-data-sampled", "Subtitle": "This is a small-sized sampled dataset from AMEX dafault prediction dataset", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3270398, "CreatorUserId": 1885842, "OwnerUserId": 1885842.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5688399.0, "CurrentDatasourceVersionId": 5763995.0, "ForumId": 3336031, "Type": 2, "CreationDate": "05/15/2023 07:57:57", "LastActivityDate": "05/15/2023", "TotalViews": 127, "TotalDownloads": 3, "TotalVotes": 8, "TotalKernels": 3}]
[{"Id": 1885842, "UserName": "kimtaehun", "DisplayName": "DataManyo", "RegisterDate": "05/05/2018", "PerformanceTier": 4}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ### 4강) TOAD 라이브러리를 활용한 신용평가 모델 개발 # > ### 데이터 준비 import warnings warnings.filterwarnings("ignore", module="sklearn.metrics.cluster") def drop_null_cols(df, threshold=0.8): """ 데이터프레임에서 결측치 비율이 threshold 이상인 변수를 제거하는 함수 """ null_percent = df.isnull().mean() drop_cols = list(null_percent[null_percent >= threshold].index) df = df.drop(drop_cols, axis=1) print(f"Dropped {len(drop_cols)} columns: {', '.join(drop_cols)}") return df df = pd.read_pickle("/kaggle/input/amex-data-sampled/train_df_sample.pkl") df = df.reset_index() import hashlib def encode_customer_id(id_str): encoded_id = hashlib.sha256(id_str.encode("utf-8")).hexdigest()[:16] return encoded_id df["customer_ID"] = df["customer_ID"].apply(encode_customer_id) df = drop_null_cols(df) cat_features = [ "B_30", "B_38", "D_114", "D_116", "D_117", "D_120", "D_126", "D_63", "D_64", "D_68", ] cat_features = [f"{cf}_last" for cf in cat_features] import random num_cols = df.select_dtypes(include=np.number).columns.tolist() num_cols = [col for col in num_cols if "target" not in col and col not in cat_features] num_cols_sample = random.sample([col for col in num_cols if "target" not in col], 100) feature_list = num_cols_sample + cat_features all_list = feature_list + ["target"] df = df[all_list] for categorical_feature in cat_features: if df[categorical_feature].dtype == "float16": df[categorical_feature] = df[categorical_feature].astype(str) if df[categorical_feature].dtype == "category": df[categorical_feature] = df[categorical_feature].astype(str) elif df[categorical_feature].dtype == "object": df[categorical_feature] = df[categorical_feature].astype(str) from sklearn.preprocessing import LabelEncoder le_encoder = LabelEncoder() for categorical_feature in cat_features: df[categorical_feature].fillna(value="NaN", inplace=True) df[categorical_feature] = le_encoder.fit_transform(df[categorical_feature]) from sklearn.impute import SimpleImputer def impute_nan(df, num_cols, strategy="mean"): """ NaN 값을 strategy에 따라 num_cols에 대해 impute하는 함수 :param df: DataFrame :param num_cols: list, imputation 대상 numeric column 리스트 :param strategy: str, imputation 전략 (default: 'mean') :return: DataFrame, imputed DataFrame """ imputer = SimpleImputer(strategy=strategy) df[num_cols] = imputer.fit_transform(df[num_cols]) return df df = impute_nan(df, num_cols_sample, strategy="mean") # > #### taod 패키지 설치 # > #### IV값을 기준으로 변수를 선택해보자. (Feature Selection) import toad # 타겟 변수 정의 target = "target" # 정보값(IV) 계산 및 특성 선택 iv_df = toad.quality(df, target=target, iv_only=True) selected_features = iv_df[iv_df["iv"] > 0.1].index # 'name' 대신 'index'를 사용합니다. # WOE 변환 trans = toad.transform.WOETransformer() df_woe = trans.fit_transform(df[selected_features], df[target]) # 이제 df_woe를 추가적인 모델링에 사용할 수 있습니다. # toad는 파이썬의 데이터 전처리 및 탐색을 위한 오픈 소스 패키지입니다. 주요 기능은 다음과 같습니다: # 데이터 품질 평가: toad.quality() 함수를 사용하여 변수별 정보값(IV, Information Value)을 계산하고, 타겟 변수와의 상관 관계를 평가합니다. 이를 통해 변수의 중요도를 판단하고 특성 선택에 활용할 수 있습니다. # 데이터 변환: WOE (Weight of Evidence) 변환을 위한 toad.transform.WOETransformer()를 사용할 수 있습니다. WOE는 범주형 변수나 연속형 변수를 범주화하여 정보값을 측정하는 방법으로, 변수의 예측력을 개선하고 모델 성능을 향상시킬 수 있습니다. df_woe # > #### 선택된 피처를 기반으로 binning 작업을 진행한다. 이번엔 방법을 조금 다르게해서, 각 구간이 최소 5%의 데이터를 가지도록 구간화를 수행한다. combiner = toad.transform.Combiner() combiner.fit(df[selected_features], y=df[target], method="chi", min_samples=0.05) # min_samples = 0.05로 설정하면, 각 구간이 최소 5%의 데이터를 가지도록 구간화를 수행합니다. # 구간화 결과를 살펴봅시다. binning_result = combiner.export() # > #### iv=True는 toad.plot 함수의 매개변수 중 하나입니다. 이 매개변수를 True로 설정하면, 그래프에 Information Value (IV) 값을 표시합니다. toad.detect(df)[:10] # > #### toad.detect(df)의 결과는 변수 이름과 해당 변수의 데이터 유형을 포함한 데이터프레임으로 반환됩니다. 결과에서 첫 10개의 변수와 그에 해당하는 데이터 유형을 확인할 수 있습니다. toad.quality(df, "target", iv_only=True)[:15] # > #### 각 변수별 IV값을 살펴보자. # #### Feature Selection # 💡 Notes: # * empty=0.9: 결측치 비율이 90%보다 큰 특성들은 필터링됩니다. # * iv=0.02: IV(Information Value)가 0.02보다 작은 특성들은 제거됩니다. # * corr=0.7: 두 개 이상의 특성들 간의 피어슨 상관계수가 0.7보다 큰 경우, IV가 더 낮은 특성들이 제거됩니다. # * return_drop=False: True로 설정하면, 함수는 삭제된 열들의 리스트를 반환합니다. # * exclude=None: 알고리즘에서 제외할 특성들의 리스트를 입력합니다. 일반적으로 ID 열이나 월(Month) 열 등이 해당됩니다. train_selected, dropped = toad.selection.select( df, target="target", empty=0.5, iv=0.05, corr=0.7, return_drop=True, exclude=["D_117_last"], ) print(dropped) print(train_selected.shape) # > #### toad.selection.select 함수는 선택된 특성들로 이루어진 훈련 데이터셋인 train_selected와 삭제된 열들의 리스트인 dropped를 반환합니다. 또한, train_selected.shape는 선택된 특성들을 포함하는 훈련 데이터셋의 크기를 출력합니다. # initialise c = toad.transform.Combiner() # Train binning with the selected features from previous; use reliable Chi-squared binning, and control that each bucket has at least 5% sample. c.fit( train_selected, y="target", method="chi", min_samples=0.05, exclude=["D_117_last"] ) print("D_59_min:", c.export()["D_59_min"]) print("R_15_std:", c.export()["R_15_std"]) print("S_3_last:", c.export()["S_3_last"]) # > ##### 위 코드는 c.export()를 사용하여 binning 결과 중 특정 변수들의 정보를 출력하는 부분입니다. # c.export(): Combiner 객체 c의 binning 결과를 딕셔너리 형태로 반환합니다. 딕셔너리의 키는 각 변수의 이름이며, 값은 해당 변수의 binning 정보를 담고 있는 객체입니다. # 각 변수에 대한 binning 정보는 해당 변수의 구간(bin)과 해당 구간의 라벨(label) 등을 포함하고 있습니다. 출력된 정보를 통해 각 변수의 binning 결과를 확인할 수 있습니다. from toad.plot import bin_plot # 학습 데이터(train_selected)에서 'var_d2' 변수의 bin 결과를 확인합니다. col = "D_59_min" # 시각화를 위해 'labels = True'로 설정하는 것이 좋습니다. bin_plot( c.transform(train_selected[[col, "target"]], labels=True), x=col, target="target" ) # > ##### 위 코드는 bin_plot 함수를 사용하여 'R_4_std' 변수의 binning 결과를 시각화하는 부분입니다. # > ##### c.transform(train_selected[[col, 'target']], labels=True): 학습 데이터(train_selected)의 'R_4_std' 변수와 목표 변수('target')를 선택하여 binning 결과를 생성합니다. labels=True로 설정하여 bin의 라벨을 포함한 결과를 반환합니다. # > ##### bin_plot(...): binning 결과를 시각화합니다. x축에는 'R_4_std' 변수의 값이 나타나며, target 변수('target')의 값에 따라 각 bin의 분포를 확인할 수 있습니다. bin_plot 함수를 통해 binning 결과를 시각적으로 확인하여 데이터의 패턴을 파악할 수 있습니다. # 학습 데이터(train_selected)에서 'D_51_min' 변수의 bin 결과를 확인합니다. col = "S_3_last" # 범주형 변수의 경우 'labels = True'로 설정하는 것이 좋습니다. bin_plot( c.transform(train_selected[[col, "target"]], labels=True), x=col, target="target" ) # Toad의 binning 기능은 범주형 및 수치형 변수를 모두 지원합니다. "toad.transform.Combiner()" 클래스를 사용하여 학습하며, 절차는 다음과 같습니다: # * 초기화(initalise) : c = toad.transform.Combiner() # * *train binning*: c.fit(dataframe, y='target', method='chi', min_samples=None, n_bins=None, empty_separate=False) # * y: 목표 변수; # * method: binning에 적용할 방법. 'chi' (카이제곱), 'dt' (의사결정 트리), 'kmeans' (K-means), 'quantile' (동일한 백분위수 기준), 'step' (동일한 간격)을 지원합니다. # * min_samples: 샘플당 요구되는 최소 수 또는 비율. 각 버킷에 필요한 최소 샘플 수 / 비율입니다. # * n_bins: 최소한의 버킷 수. 수가 너무 큰 경우, 알고리즘은 얻을 수 있는 최대 버킷 수를 반환합니다. # * empty_separate: 누락된 값이 버킷 내에 별도로 분리되는지 여부. False인 경우, 누락된 값은 가장 가까운 나쁜 비율 버킷과 함께 배치됩니다. # * binning 결과: c.export() # * bins 조정: c.update(dict) # * bins 적용 및 이산값으로 변환: c.transform(dataframe, labels=False) # * labels: 데이터를 설명 라벨로 변환할지 여부. False인 경우 0, 1, 2...로 반환됩니다. 범주형 변수는 비율의 내림차순으로 정렬됩니다. True인 경우 (-무한대, 0], (0, 10], (10, 무한대)와 같이 반환됩니다. # * 참고: 1. 불필요한 열을 제외하는 것을 잊지 마세요. 특히 ID 열과 타임스탬프 열은 제외해야 합니다.. 2. 고유 값이 많은 열은 학습에 많은 시간이 소요될 수 있습니다.* # 초기화 transer = toad.transform.WOETransformer() # transer.fit_transform() 및 combiner.transform()을 적용합니다. target을 제외하도록 주의하세요. train_woe = transer.fit_transform( c.transform(train_selected), train_selected["target"], exclude="target" ) train_woe.head(3) # * WOETransformer를 초기화합니다. # * combiner.transform()의 결과에 transer.fit_transform()을 적용합니다. # * target을 제외하도록 주의하세요. # * 변환된 train_woe 데이터프레임의 처음 3개 행을 출력합니다. col = train_woe.columns.tolist()[:-1] col.remove("D_117_last") # > #### 예시를 위해 제거했던 변수를 제거해줍니다. # > #### 예시를 위해 간단한 로지스틱 회귀 모델을 적합해봅니다. from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(train_woe[col], train_woe["target"]) # 훈련 데이터와 Out-of-Time (OOT) 데이터에 대해 예측된 확률을 구합니다. pred_train = lr.predict_proba(train_woe[col])[:, 1] # > #### Toad 역시 쉽게 KS 통계량과 AUC를 계산할 수 있는 라이브러리를 제공합니다. from toad.metrics import KS, AUC print("train KS", KS(pred_train, train_woe["target"])) print("train AUC", AUC(pred_train, train_woe["target"])) # > #### Toad에서도 OptBinning과 마찬가지로 스코어링을 할 수 있는 스코어 카드 기능을 제공합니다. card = toad.ScoreCard( combiner=c, transer=transer, C=0.1, base_score=600, base_odds=35, pdo=60, rate=2 ) card.fit(train_woe[col], train_woe["target"]) # # 💡 Note!: # * toad.ScoreCard는 Scorecard 모델을 생성하기 위한 클래스입니다. # * combiner는 binning 결과를 담고 있는 toad.transform.Combiner 객체를 전달합니다. # * transer는 WOE 변환을 담당하는 toad.transform.WOETransformer 객체를 전달합니다. # * C는 Logistic Regression에서의 규제 강도를 나타내는 매개변수입니다. # * base_score는 기준 스코어로, 기본적으로 모든 변수의 WOE 값이 0일 때의 스코어입니다. # * base_odds는 기준 오즈로, 기본적으로 모든 변수의 WOE 값이 0일 때의 오즈입니다. # * pdo는 Point to Double the Odds로, 오즈를 두배로 만들기 위해 필요한 점수의 차이입니다. # * rate는 모델의 점수 스케일을 조절하기 위한 비율입니다. # * fit() 메서드를 사용하여 Scorecard 모델을 훈련시킵니다. train_woe[col]은 독립 변수를, train_woe['target']은 종속 변수를 나타냅니다. sample_train_woe = train_woe.sample(3) score_sample = card.predict(sample_train_woe[col]) score_sample_rounded = score_sample.round().astype(int) for i, score in enumerate(score_sample_rounded, start=1): print(f"{i}번째 고객의 점수는 \033[1;34m{score}\033[0m점 입니다.") # # 💡 Note: # * 1. `sample_train_woe = train_woe.sample(3)`: `sample()` 함수를 사용하여 `train_woe` 데이터셋에서 임의로 3개의 샘플을 추출합니다. 이 샘플들은 점수를 계산할 데이터입니다. # * 2. `score_sample = card.predict(sample_train_woe[col])`: `card` 라는 ScoreCard 객체의 `predict()` 메서드를 사용하여 위에서 추출한 샘플에 대한 점수를 계산합니다. 이 때, `sample_train_woe[col]`을 통해 샘플의 해당 컬럼(변수)만 사용하게 됩니다. # * 3. `score_sample_rounded = score_sample.round().astype(int)`: 계산된 점수는 일반적으로 소수점 형태로 반환됩니다. 이 줄의 코드는 `round()` 함수를 사용하여 계산된 점수를 반올림하고, `astype(int)`를 통해 정수형으로 변환합니다. # * 4. `for i, score in enumerate(score_sample_rounded, start=1)`: 이 줄은 반복문을 사용하여 각 샘플에 대한 점수를 순서대로 출력합니다. `enumerate()` 함수는 반복 가능한 객체(여기서는 `score_sample_rounded`)를 입력으로 받아 인덱스 번호(i)와 그에 해당하는 값을(score) 함께 반환합니다. `start=1`은 인덱스 번호가 1부터 시작하도록 설정합니다. # * 5. `print(f"{i}번째 고객의 점수는 \033[1;34m{score}\033[0m점 입니다.")`: 이 줄은 문자열 포매팅을 사용하여 각 샘플에 대한 점수를 보기 좋게 출력합니다. `\033[1;34m`와 `\033[0m` 사이에 있는 텍스트는 파란색으로 출력됩니다. card.export()
false
0
4,837
3
4,863
4,837
129870816
<jupyter_start><jupyter_text>Adult Census Income This data was extracted from the [1994 Census bureau database][1] by Ronny Kohavi and Barry Becker (Data Mining and Visualization, Silicon Graphics). A set of reasonably clean records was extracted using the following conditions: ((AAGE>16) && (AGI>100) && (AFNLWGT>1) && (HRSWK>0)). *The prediction task is to determine whether a person makes over $50K a year*. ## Description of fnlwgt (final weight) The weights on the Current Population Survey (CPS) files are controlled to independent estimates of the civilian noninstitutional population of the US. These are prepared monthly for us by Population Division here at the Census Bureau. We use 3 sets of controls. These are: 1. A single cell estimate of the population 16+ for each state. 2. Controls for Hispanic Origin by age and sex. 3. Controls by Race, age and sex. We use all three sets of controls in our weighting program and "rake" through them 6 times so that by the end we come back to all the controls we used. The term estimate refers to population totals derived from CPS by creating "weighted tallies" of any specified socio-economic characteristics of the population. People with similar demographic characteristics should have similar weights. There is one important caveat to remember about this statement. That is that since the CPS sample is actually a collection of 51 state samples, each with its own probability of selection, the statement only applies within state. ##Relevant papers Ron Kohavi, ["Scaling Up the Accuracy of Naive-Bayes Classifiers: a Decision-Tree Hybrid"][2], *Proceedings of the Second International Conference on Knowledge Discovery and Data Mining*, 1996. (PDF) [1]: http://www.census.gov/en.html [2]: http://robotics.stanford.edu/~ronnyk/nbtree.pdf Kaggle dataset identifier: adult-census-income <jupyter_script>import pandas as pd import matplotlib.pyplot as plt data = pd.read_csv(r"/kaggle/input/adult-census-income/adult.csv") data # marital.status : 婚姻狀況 data.keys() txt_col = [ "workclass", "education", "marital.status", "occupation", "relationship", "race", "sex", "native.country", "income", ] data["relationship_change"] = data["relationship"].replace( ["Wife", "Husband"], "married" ) # data['relationship_change'] = data['relationship'].replace('Husband','married') data["relationship_change"].value_counts() data with_question = ["workclass", "occupation", "native.country"] # plt.hist(data['fnlwgt']) # plt.figure() data.hist(figsize=(10, 10)) for i in txt_col: ed = data[i].value_counts() ed_pd = pd.DataFrame(ed) plt.figure(figsize=(20, 15)) plt.bar(ed_pd.index, height=ed_pd[i])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/870/129870816.ipynb
adult-census-income
null
[{"Id": 129870816, "ScriptId": 38625846, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8215644, "CreationDate": "05/17/2023 05:10:11", "VersionNumber": 1.0, "Title": "Adult income", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 40.0, "LinesInsertedFromPrevious": 40.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186271153, "KernelVersionId": 129870816, "SourceDatasetVersionId": 498}]
[{"Id": 498, "DatasetId": 225, "DatasourceVersionId": 498, "CreatorUserId": 495305, "LicenseName": "CC0: Public Domain", "CreationDate": "10/07/2016 23:42:59", "VersionNumber": 3.0, "Title": "Adult Census Income", "Slug": "adult-census-income", "Subtitle": "Predict whether income exceeds $50K/yr based on census data", "Description": "This data was extracted from the [1994 Census bureau database][1] by Ronny Kohavi and Barry Becker (Data Mining and Visualization, Silicon Graphics). A set of reasonably clean records was extracted using the following conditions: ((AAGE>16) && (AGI>100) && (AFNLWGT>1) && (HRSWK>0)). *The prediction task is to determine whether a person makes over $50K a year*.\n\n## Description of fnlwgt (final weight)\n\nThe weights on the Current Population Survey (CPS) files are controlled to independent estimates of the civilian noninstitutional population of the US. These are prepared monthly for us by Population Division here at the Census Bureau. We use 3 sets of controls. These are: \n\n 1. A single cell estimate of the population 16+ for each state.\n \n 2. Controls for Hispanic Origin by age and sex.\n\n 3. Controls by Race, age and sex.\n\nWe use all three sets of controls in our weighting program and \"rake\" through them 6 times so that by the end we come back to all the controls we used. The term estimate refers to population totals derived from CPS by creating \"weighted tallies\" of any specified socio-economic characteristics of the population. People with similar demographic characteristics should have similar weights. There is one important caveat to remember about this statement. That is that since the CPS sample is actually a collection of 51 state samples, each with its own probability of selection, the statement only applies within state.\n\n##Relevant papers\n\nRon Kohavi, [\"Scaling Up the Accuracy of Naive-Bayes Classifiers: a Decision-Tree Hybrid\"][2], *Proceedings of the Second International Conference on Knowledge Discovery and Data Mining*, 1996. (PDF)\n\n [1]: http://www.census.gov/en.html\n [2]: http://robotics.stanford.edu/~ronnyk/nbtree.pdf", "VersionNotes": "Removed leading whitespace everywhere", "TotalCompressedBytes": 4104734.0, "TotalUncompressedBytes": 4104734.0}]
[{"Id": 225, "CreatorUserId": 495305, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 498.0, "CurrentDatasourceVersionId": 498.0, "ForumId": 1649, "Type": 2, "CreationDate": "10/06/2016 17:19:07", "LastActivityDate": "02/05/2018", "TotalViews": 592007, "TotalDownloads": 52622, "TotalVotes": 593, "TotalKernels": 478}]
null
import pandas as pd import matplotlib.pyplot as plt data = pd.read_csv(r"/kaggle/input/adult-census-income/adult.csv") data # marital.status : 婚姻狀況 data.keys() txt_col = [ "workclass", "education", "marital.status", "occupation", "relationship", "race", "sex", "native.country", "income", ] data["relationship_change"] = data["relationship"].replace( ["Wife", "Husband"], "married" ) # data['relationship_change'] = data['relationship'].replace('Husband','married') data["relationship_change"].value_counts() data with_question = ["workclass", "occupation", "native.country"] # plt.hist(data['fnlwgt']) # plt.figure() data.hist(figsize=(10, 10)) for i in txt_col: ed = data[i].value_counts() ed_pd = pd.DataFrame(ed) plt.figure(figsize=(20, 15)) plt.bar(ed_pd.index, height=ed_pd[i])
false
0
295
0
777
295
129870360
# # Import Libraries and set paths import numpy as np import glob import PIL.Image as Image import matplotlib.pyplot as plt import matplotlib.patches as patches from tqdm import tqdm from io import StringIO from sklearn.metrics import fbeta_score from skimage.util import view_as_windows from scipy.ndimage import distance_transform_edt from numba import jit # Constants PREFIX = "/kaggle/input/vesuvius-challenge-ink-detection/train/3/" # Load mask image mask = np.array(Image.open(PREFIX + "mask.png").convert("1")) # Load label image label = (np.array(Image.open(PREFIX + "inklabels.png")) > 0).astype(np.float32) # Load infrared image ir = np.array(Image.open(PREFIX + "ir.png")) # Load the 3D x-ray scan, one slice at a time images = [ np.array(Image.open(filename)) for filename in tqdm(sorted(glob.glob(PREFIX + "surface_volume/*.tif"))) ] @jit(nopython=True) def get_value_ink_ratio(value_count_ink, value_count_all, a, label): for v, l in zip(a.ravel(), label.ravel()): value_count_all[v] += 1 if l: value_count_ink[v] += 1 def plot_ink_ratio(value_count_ink, value_count_all, a): value_ink_ratio = np.where( value_count_all == 0, 0, value_count_ink / value_count_all ) x = np.arange(len(value_ink_ratio)) # plot ink ratio distribution fig, ax = plt.subplots(1, 1, figsize=(14, 2)) ax.plot(x, value_ink_ratio, linestyle="", marker=".") plt.show() # select sorted_by_ink = np.argsort(value_ink_ratio) sorted_ink_ratio = value_ink_ratio[sorted_by_ink] truth = value_count_all.sum() f05 = np.zeros(101) best_f05 = 0 best_th = 0 for th in range(101): high_ink_ratio = sorted_by_ink[sorted_ink_ratio > th / 100] tp = value_count_ink[high_ink_ratio].sum() fp = value_count_all[high_ink_ratio].sum() - tp fn = truth - tp f05[th] = 1.25 * tp / (1.25 * tp + fp + 0.25 * fn) if best_f05 < f05[th]: best_th = th / 100 best_f05 = f05[th] fig, ax = plt.subplots(1, 1, figsize=(14, 2)) ax.plot(np.arange(101) / 100, f05, linestyle="", marker=".") plt.show() high_ink_ratio = sorted_by_ink[sorted_ink_ratio > 0.15] high_ink = np.isin(a, high_ink_ratio) print("Number of high ink ratio values:", high_ink.shape, best_th, best_f05) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7)) ax1.imshow(high_ink, cmap="gray") ax2.imshow(label, cmap="gray") plt.show() NUM_VALUES = 256 value_count_ink = np.zeros(NUM_VALUES, dtype=int) value_count_all = np.zeros(NUM_VALUES, dtype=int) get_value_ink_ratio(value_count_ink, value_count_all, ir, label) plot_ink_ratio(value_count_ink, value_count_all, ir) NUM_VALUES = 65536 Z_DIM = len(images) value_count_ink = np.zeros((Z_DIM, NUM_VALUES), dtype=int) value_count_all = np.zeros((Z_DIM, NUM_VALUES), dtype=int) for z in tqdm(range(Z_DIM)): img = images[z] get_value_ink_ratio(value_count_ink[z], value_count_all[z], img, label) plot_ink_ratio(value_count_ink[z], value_count_all[z], img) # Function to generate run-length encoding (RLE) for the binary mask def rle(img): pixels = img.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] f = StringIO() np.savetxt(f, runs.reshape(1, -1), delimiter=" ", fmt="%d") predicted = f.getvalue().strip() return predicted # Generate RLE for the binary output rle_output = rle(binary_output) # Save the RLE to a CSV file for submission with open("submission.csv", "w") as f: f.write("Id,Predicted\n") f.write("a," + rle_output + "\n") f.write("b," + rle_output + "\n") print("Submission file 'submission.csv' has been generated.")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/870/129870360.ipynb
null
null
[{"Id": 129870360, "ScriptId": 38602646, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2586962, "CreationDate": "05/17/2023 05:04:37", "VersionNumber": 2.0, "Title": "Visualize ink to pixel value correlation", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 114.0, "LinesInsertedFromPrevious": 64.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 50.0, "LinesInsertedFromFork": 62.0, "LinesDeletedFromFork": 257.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 52.0, "TotalVotes": 0}]
null
null
null
null
# # Import Libraries and set paths import numpy as np import glob import PIL.Image as Image import matplotlib.pyplot as plt import matplotlib.patches as patches from tqdm import tqdm from io import StringIO from sklearn.metrics import fbeta_score from skimage.util import view_as_windows from scipy.ndimage import distance_transform_edt from numba import jit # Constants PREFIX = "/kaggle/input/vesuvius-challenge-ink-detection/train/3/" # Load mask image mask = np.array(Image.open(PREFIX + "mask.png").convert("1")) # Load label image label = (np.array(Image.open(PREFIX + "inklabels.png")) > 0).astype(np.float32) # Load infrared image ir = np.array(Image.open(PREFIX + "ir.png")) # Load the 3D x-ray scan, one slice at a time images = [ np.array(Image.open(filename)) for filename in tqdm(sorted(glob.glob(PREFIX + "surface_volume/*.tif"))) ] @jit(nopython=True) def get_value_ink_ratio(value_count_ink, value_count_all, a, label): for v, l in zip(a.ravel(), label.ravel()): value_count_all[v] += 1 if l: value_count_ink[v] += 1 def plot_ink_ratio(value_count_ink, value_count_all, a): value_ink_ratio = np.where( value_count_all == 0, 0, value_count_ink / value_count_all ) x = np.arange(len(value_ink_ratio)) # plot ink ratio distribution fig, ax = plt.subplots(1, 1, figsize=(14, 2)) ax.plot(x, value_ink_ratio, linestyle="", marker=".") plt.show() # select sorted_by_ink = np.argsort(value_ink_ratio) sorted_ink_ratio = value_ink_ratio[sorted_by_ink] truth = value_count_all.sum() f05 = np.zeros(101) best_f05 = 0 best_th = 0 for th in range(101): high_ink_ratio = sorted_by_ink[sorted_ink_ratio > th / 100] tp = value_count_ink[high_ink_ratio].sum() fp = value_count_all[high_ink_ratio].sum() - tp fn = truth - tp f05[th] = 1.25 * tp / (1.25 * tp + fp + 0.25 * fn) if best_f05 < f05[th]: best_th = th / 100 best_f05 = f05[th] fig, ax = plt.subplots(1, 1, figsize=(14, 2)) ax.plot(np.arange(101) / 100, f05, linestyle="", marker=".") plt.show() high_ink_ratio = sorted_by_ink[sorted_ink_ratio > 0.15] high_ink = np.isin(a, high_ink_ratio) print("Number of high ink ratio values:", high_ink.shape, best_th, best_f05) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7)) ax1.imshow(high_ink, cmap="gray") ax2.imshow(label, cmap="gray") plt.show() NUM_VALUES = 256 value_count_ink = np.zeros(NUM_VALUES, dtype=int) value_count_all = np.zeros(NUM_VALUES, dtype=int) get_value_ink_ratio(value_count_ink, value_count_all, ir, label) plot_ink_ratio(value_count_ink, value_count_all, ir) NUM_VALUES = 65536 Z_DIM = len(images) value_count_ink = np.zeros((Z_DIM, NUM_VALUES), dtype=int) value_count_all = np.zeros((Z_DIM, NUM_VALUES), dtype=int) for z in tqdm(range(Z_DIM)): img = images[z] get_value_ink_ratio(value_count_ink[z], value_count_all[z], img, label) plot_ink_ratio(value_count_ink[z], value_count_all[z], img) # Function to generate run-length encoding (RLE) for the binary mask def rle(img): pixels = img.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] f = StringIO() np.savetxt(f, runs.reshape(1, -1), delimiter=" ", fmt="%d") predicted = f.getvalue().strip() return predicted # Generate RLE for the binary output rle_output = rle(binary_output) # Save the RLE to a CSV file for submission with open("submission.csv", "w") as f: f.write("Id,Predicted\n") f.write("a," + rle_output + "\n") f.write("b," + rle_output + "\n") print("Submission file 'submission.csv' has been generated.")
false
0
1,341
0
1,341
1,341
129870775
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn.model_selection import train_test_split from scipy.stats import spearmanr import pandas as pd import numpy as np import matplotlib.pyplot as plt import warnings from tqdm import tqdm import requests warnings.filterwarnings( "ignore" ) # This is not advised in general, but it is used in this notebook to clean the presentation of results """ This is a basic example of what you need to do to participate to the tournament. The code will not have access to the internet (or any socket related operation). """ # Imports import xgboost as xgb import pandas as pd import typing import joblib from pathlib import Path def train( X_train: pd.DataFrame, y_train: pd.DataFrame, model_directory_path: str = "resources", ) -> None: """ Do your model training here. At each retrain this function will have to save an updated version of the model under the model_directiory_path, as in the example below. Note: You can use other serialization methods than joblib.dump(), as long as it matches what reads the model in infer(). Args: X_train, y_train: the data to train the model. model_directory_path: the path to save your updated model Returns: None """ # basic xgboost regressor model = xgb.XGBRegressor( objective="reg:squarederror", max_depth=3, learning_rate=0.1, n_estimators=50, max_leaves=2**3, n_jobs=-1, colsample_bytree=0.1, ) # training the model print("training...") model.fit(X_train.iloc[:, 2:], y_train.iloc[:, 2:]) # make sure that the train function correctly save the trained model # in the model_directory_path model_pathname = Path(model_directory_path) / "model.joblib" print(f"Saving model in {model_pathname}") joblib.dump(model, model_pathname) def infer( X_test: pd.DataFrame, model_directory_path: str = "resources" ) -> pd.DataFrame: """ Do your inference here. This function will load the model saved at the previous iteration and use it to produce your inference on the current date. It is mandatory to send your inferences with the ids so the system can match it correctly. Args: model_directory_path: the path to the directory to the directory in wich we will be saving your updated model. X_test: the independant variables of the current date passed to your model. Returns: A dataframe (date, id, value) with the inferences of your model for the current date. """ # loading the model saved by the train function at previous iteration model = joblib.load(Path(model_directory_path) / "model.joblib") # creating the predicted label dataframe with correct dates and ids y_test_predicted = X_test[["date", "id"]].copy() y_test_predicted["value"] = model.predict(X_test.iloc[:, 2:]) return y_test_predicted X_train = pd.read_parquet( "/kaggle/input/adia-lab-crunchdao-competition/X_train.parquet" ) y_train = pd.read_parquet( "/kaggle/input/adia-lab-crunchdao-competition/y_train.parquet" ) X_test = pd.read_parquet("/kaggle/input/adia-lab-crunchdao-competition/X_test.parquet") print( "Splitting (X_train, y_train) in X_train_local, X_test_local, y_train_local, y_test_local" ) X_train_local, X_test_local, y_train_local, y_test_local = train_test_split( X_train, y_train, test_size=0.2, shuffle=False ) # Training. It may require a few minutes. train(X_train_local, y_train_local, "/kaggle/working/") print("Inference") y_test_local_pred = infer(X_test_local, model_directory_path="/kaggle/working/") score = spearmanr(y_test_local["y"], y_test_local_pred["value"])[0] * 100 print(f"Spearman's correlation {score}") X_train.head(10)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/870/129870775.ipynb
null
null
[{"Id": 129870775, "ScriptId": 38622519, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2003977, "CreationDate": "05/17/2023 05:09:32", "VersionNumber": 1.0, "Title": "ADIA Lab Competition - EDA", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 156.0, "LinesInsertedFromPrevious": 156.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn.model_selection import train_test_split from scipy.stats import spearmanr import pandas as pd import numpy as np import matplotlib.pyplot as plt import warnings from tqdm import tqdm import requests warnings.filterwarnings( "ignore" ) # This is not advised in general, but it is used in this notebook to clean the presentation of results """ This is a basic example of what you need to do to participate to the tournament. The code will not have access to the internet (or any socket related operation). """ # Imports import xgboost as xgb import pandas as pd import typing import joblib from pathlib import Path def train( X_train: pd.DataFrame, y_train: pd.DataFrame, model_directory_path: str = "resources", ) -> None: """ Do your model training here. At each retrain this function will have to save an updated version of the model under the model_directiory_path, as in the example below. Note: You can use other serialization methods than joblib.dump(), as long as it matches what reads the model in infer(). Args: X_train, y_train: the data to train the model. model_directory_path: the path to save your updated model Returns: None """ # basic xgboost regressor model = xgb.XGBRegressor( objective="reg:squarederror", max_depth=3, learning_rate=0.1, n_estimators=50, max_leaves=2**3, n_jobs=-1, colsample_bytree=0.1, ) # training the model print("training...") model.fit(X_train.iloc[:, 2:], y_train.iloc[:, 2:]) # make sure that the train function correctly save the trained model # in the model_directory_path model_pathname = Path(model_directory_path) / "model.joblib" print(f"Saving model in {model_pathname}") joblib.dump(model, model_pathname) def infer( X_test: pd.DataFrame, model_directory_path: str = "resources" ) -> pd.DataFrame: """ Do your inference here. This function will load the model saved at the previous iteration and use it to produce your inference on the current date. It is mandatory to send your inferences with the ids so the system can match it correctly. Args: model_directory_path: the path to the directory to the directory in wich we will be saving your updated model. X_test: the independant variables of the current date passed to your model. Returns: A dataframe (date, id, value) with the inferences of your model for the current date. """ # loading the model saved by the train function at previous iteration model = joblib.load(Path(model_directory_path) / "model.joblib") # creating the predicted label dataframe with correct dates and ids y_test_predicted = X_test[["date", "id"]].copy() y_test_predicted["value"] = model.predict(X_test.iloc[:, 2:]) return y_test_predicted X_train = pd.read_parquet( "/kaggle/input/adia-lab-crunchdao-competition/X_train.parquet" ) y_train = pd.read_parquet( "/kaggle/input/adia-lab-crunchdao-competition/y_train.parquet" ) X_test = pd.read_parquet("/kaggle/input/adia-lab-crunchdao-competition/X_test.parquet") print( "Splitting (X_train, y_train) in X_train_local, X_test_local, y_train_local, y_test_local" ) X_train_local, X_test_local, y_train_local, y_test_local = train_test_split( X_train, y_train, test_size=0.2, shuffle=False ) # Training. It may require a few minutes. train(X_train_local, y_train_local, "/kaggle/working/") print("Inference") y_test_local_pred = infer(X_test_local, model_directory_path="/kaggle/working/") score = spearmanr(y_test_local["y"], y_test_local_pred["value"])[0] * 100 print(f"Spearman's correlation {score}") X_train.head(10)
false
0
1,255
0
1,255
1,255
129160363
<jupyter_start><jupyter_text>Mobile Games A/B Testing - Cookie Cats ### Context This dataset includes A/B test results of Cookie Cats to examine what happens when the first gate in the game was moved from level 30 to level 40. When a player installed the game, he or she was randomly assigned to either gate_30 or gate_40. ### Content The data we have is from 90,189 players that installed the game while the AB-test was running. The variables are: **userid:** A unique number that identifies each player. **version:** Whether the player was put in the control group (gate_30 - a gate at level 30) or the group with the moved gate (gate_40 - a gate at level 40). **sum_gamerounds:** the number of game rounds played by the player during the first 14 days after install. **retention_1:** Did the player come back and play <strong>1 day</strong> after installing? **retention_7:** Did the player come back and play <strong>7 days</strong> after installing? When a player installed the game, he or she was randomly assigned to either. Kaggle dataset identifier: mobile-games-ab-testing-cookie-cats <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_csv("/kaggle/input/mobile-games-ab-testing-cookie-cats/cookie_cats.csv") data.head(10) data.info() data.shape data.describe() data["userid"].nunique() data.groupby("version")[["userid"]].nunique() import matplotlib.pyplot as plt plot_data = data.groupby("sum_gamerounds")["userid"].count() ax = plot_data.head(100).plot() plt.title("The distribution of players", fontweight="bold", size=14) plt.xlabel("total gamerounds", size=12) plt.ylabel("number of player", size=12) plt.show() data_retention = data[["retention_1", "retention_7"]].mean() * 100 print( f"1-day ratio: {round(data_retention[0],2)}% 7-days ratio: {round(data_retention[1],2)}%" ) # Creating an list with bootstrapped means for each A/B group boot_1d = [] boot_7d = [] for i in range(1000): boot_mean_1 = ( data.sample(frac=1, replace=True).groupby("version")["retention_1"].mean() ) boot_mean_7 = ( data.sample(frac=1, replace=True).groupby("version")["retention_7"].mean() ) boot_1d.append(boot_mean_1) boot_7d.append(boot_mean_7) # Transforming the list to a DataFrame boot_1d = pd.DataFrame(boot_1d) boot_7d = pd.DataFrame(boot_7d) # Kernel Density Estimate plot of the bootstrap distributions fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(13, 5)) boot_1d.plot.kde(ax=ax1) ax1.set_xlabel("retantion rate", size=12) ax1.set_ylabel("number of sample", size=12) ax1.set_title("1 day retention rate distribution", fontweight="bold", size=14) boot_7d.plot.kde(ax=ax2) ax2.set_xlabel("retantion rate", size=12) ax2.set_title("7 days retention rate distribution", fontweight="bold", size=14) plt.show() # Adding a column with the % difference between the two A/B groups boot_1d["diff"] = (boot_1d["gate_30"] - boot_1d["gate_40"]) / boot_1d["gate_40"] * 100 boot_7d["diff"] = (boot_7d["gate_30"] - boot_7d["gate_40"]) / boot_7d["gate_40"] * 100 # Ploting the bootstrap % difference fig, (ax1) = plt.subplots(1, 1, figsize=(6, 5)) boot_1d["diff"].plot.kde(ax=ax1, c="#ff99ff", label="1 day retention") boot_7d["diff"].plot.kde(ax=ax1, c="#00bfff", label="7 days retention") ax1.set_xlabel("% difference", size=12) ax1.set_ylabel("% density", size=12) ax1.set_title( "Difference in retention \n between the two A/B groups", fontweight="bold", size=14 ) plt.legend() plt.show() # Calculating the probability that 1-day retention is greater when the gate is at level 30 prob_1 = (boot_1d["diff"] > 0).sum() / len(boot_1d["diff"]) # Calculating the probability that 7-days retention is greater when the gate is at level 30 prob_7 = (boot_7d["diff"] > 0).sum() / len(boot_7d["diff"]) # Pretty printing the probability print( f"The probability that 1-day retention is greater when the gate is at level 30: {round(prob_1,2)*100}% \ \nThe probability that 7-days retention is greater when the gate is at level 30: {(prob_7)*100}% " )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/160/129160363.ipynb
mobile-games-ab-testing-cookie-cats
mursideyarkin
[{"Id": 129160363, "ScriptId": 38397555, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15054663, "CreationDate": "05/11/2023 12:46:55", "VersionNumber": 2.0, "Title": "gb.ru/lessons/318486/", "EvaluationDate": "05/11/2023", "IsChange": false, "TotalLines": 97.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 97.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184962116, "KernelVersionId": 129160363, "SourceDatasetVersionId": 1927698}]
[{"Id": 1927698, "DatasetId": 1149830, "DatasourceVersionId": 1966271, "CreatorUserId": 6409983, "LicenseName": "Other (specified in description)", "CreationDate": "02/10/2021 08:16:25", "VersionNumber": 1.0, "Title": "Mobile Games A/B Testing - Cookie Cats", "Slug": "mobile-games-ab-testing-cookie-cats", "Subtitle": "Mobile Games A/B Testing - Cookie Cats", "Description": "### Context\n\nThis dataset includes A/B test results of Cookie Cats to examine what happens when the first gate in the game was moved from level 30 to level 40. When a player installed the game, he or she was randomly assigned to either gate_30 or gate_40. \n\n### Content\n\nThe data we have is from 90,189 players that installed the game while the AB-test was running. The variables are:\n\n**userid:** A unique number that identifies each player.\n**version:** Whether the player was put in the control group (gate_30 - a gate at level 30) or the group with the moved gate (gate_40 - a gate at level 40).\n**sum_gamerounds:** the number of game rounds played by the player during the first 14 days after install.\n**retention_1:** Did the player come back and play <strong>1 day</strong> after installing?\n**retention_7:** Did the player come back and play <strong>7 days</strong> after installing?\n\nWhen a player installed the game, he or she was randomly assigned to either. \n\n### Acknowledgements\n\nThis dataset is taken from [DataCamp](https://www.datacamp.com/projects/184) \nCookie Cat is a hugely popular mobile puzzle game developed by [Tactile Entertainment](https://tactilegames.com/cookie-cats/)\n\nThanks to them for this dataset! \ud83d\ude3b", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1149830, "CreatorUserId": 6409983, "OwnerUserId": 6409983.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1927698.0, "CurrentDatasourceVersionId": 1966271.0, "ForumId": 1167368, "Type": 2, "CreationDate": "02/10/2021 08:16:25", "LastActivityDate": "02/10/2021", "TotalViews": 10291, "TotalDownloads": 742, "TotalVotes": 17, "TotalKernels": 21}]
[{"Id": 6409983, "UserName": "mursideyarkin", "DisplayName": "M\u00fcr\u015fide Yark\u0131n", "RegisterDate": "12/20/2020", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_csv("/kaggle/input/mobile-games-ab-testing-cookie-cats/cookie_cats.csv") data.head(10) data.info() data.shape data.describe() data["userid"].nunique() data.groupby("version")[["userid"]].nunique() import matplotlib.pyplot as plt plot_data = data.groupby("sum_gamerounds")["userid"].count() ax = plot_data.head(100).plot() plt.title("The distribution of players", fontweight="bold", size=14) plt.xlabel("total gamerounds", size=12) plt.ylabel("number of player", size=12) plt.show() data_retention = data[["retention_1", "retention_7"]].mean() * 100 print( f"1-day ratio: {round(data_retention[0],2)}% 7-days ratio: {round(data_retention[1],2)}%" ) # Creating an list with bootstrapped means for each A/B group boot_1d = [] boot_7d = [] for i in range(1000): boot_mean_1 = ( data.sample(frac=1, replace=True).groupby("version")["retention_1"].mean() ) boot_mean_7 = ( data.sample(frac=1, replace=True).groupby("version")["retention_7"].mean() ) boot_1d.append(boot_mean_1) boot_7d.append(boot_mean_7) # Transforming the list to a DataFrame boot_1d = pd.DataFrame(boot_1d) boot_7d = pd.DataFrame(boot_7d) # Kernel Density Estimate plot of the bootstrap distributions fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(13, 5)) boot_1d.plot.kde(ax=ax1) ax1.set_xlabel("retantion rate", size=12) ax1.set_ylabel("number of sample", size=12) ax1.set_title("1 day retention rate distribution", fontweight="bold", size=14) boot_7d.plot.kde(ax=ax2) ax2.set_xlabel("retantion rate", size=12) ax2.set_title("7 days retention rate distribution", fontweight="bold", size=14) plt.show() # Adding a column with the % difference between the two A/B groups boot_1d["diff"] = (boot_1d["gate_30"] - boot_1d["gate_40"]) / boot_1d["gate_40"] * 100 boot_7d["diff"] = (boot_7d["gate_30"] - boot_7d["gate_40"]) / boot_7d["gate_40"] * 100 # Ploting the bootstrap % difference fig, (ax1) = plt.subplots(1, 1, figsize=(6, 5)) boot_1d["diff"].plot.kde(ax=ax1, c="#ff99ff", label="1 day retention") boot_7d["diff"].plot.kde(ax=ax1, c="#00bfff", label="7 days retention") ax1.set_xlabel("% difference", size=12) ax1.set_ylabel("% density", size=12) ax1.set_title( "Difference in retention \n between the two A/B groups", fontweight="bold", size=14 ) plt.legend() plt.show() # Calculating the probability that 1-day retention is greater when the gate is at level 30 prob_1 = (boot_1d["diff"] > 0).sum() / len(boot_1d["diff"]) # Calculating the probability that 7-days retention is greater when the gate is at level 30 prob_7 = (boot_7d["diff"] > 0).sum() / len(boot_7d["diff"]) # Pretty printing the probability print( f"The probability that 1-day retention is greater when the gate is at level 30: {round(prob_1,2)*100}% \ \nThe probability that 7-days retention is greater when the gate is at level 30: {(prob_7)*100}% " )
false
1
1,246
0
1,538
1,246
129160335
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Spaceship Titanic # ![joel-filipe-QwoNAhbmLLo-unsplash.jpg](attachment:d8768241-abb3-4c99-a181-c091ec453d26.jpg) # Table of Contents # * [Introduction](#section-zero) # * [Importing Python Libraries](#section-one) # * [Working with Data](#section-two) # - [Reading the Data](#subsection-one) # - [Missing/Null Values](#subsection-two) # - [Data Visualizations](#subsection-three) # * [Feature Selection](#section-three) # - [Fitting the Model](#subsection-four) # - [Model Selection](#subsection-five) # - [Decision Tree](#subsection-six) # * [Submission](#section-four) # Introduction # What is Spaceship Titanic? # > The ***Spaceship Titanic*** was an interstellar passenger liner launched a month ago. With almost 13,000 passengers on board, the vessel set out on its maiden voyage transporting emigrants from our solar system to three newly habitable exoplanets orbiting nearby stars. # What is our Job? # > In this competition our task is to predict ***whether a passenger was transported*** to an alternate dimension during the Spaceship Titanic's collision with the spacetime anomaly. # What are the Data we are provided with? # > ***PassengerId*** - A unique Id for each passenger. Each Id takes the form gggg_pp where gggg indicates a group the passenger is travelling with and pp is their number within the group. People in a group are often family members, but not always. # > # > ***HomePlanet*** - The planet the passenger departed from, typically their planet of permanent residence. # > # > ***CryoSleep*** - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins. # > # > ***Cabin*** - The cabin number where the passenger is staying. Takes the form deck/num/side, where side can be either P for Port or S for Starboard. # > # > ***Destination*** - The planet the passenger will be debarking to. # > # > ***Age*** - The age of the passenger. # > # > ***VIP*** - Whether the passenger has paid for special VIP service during the voyage. # > # > ***RoomService, FoodCourt, ShoppingMall, Spa, VRDeck*** - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities. # > # > ***Name*** - The first and last names of the passenger. # > # > ***Transported*** - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict. # Importing Python Libraries # import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # # Working with the Data # What changes/modifications are required for Data Cleaning? # > ***Reading and Analyzing the .csv files:*** Firstly, we will be going through the train and test datasets and checking for the important columns, the missing/null values and columns that are irrevalant to us. # > # > ***Deleting Columns:*** As mentioned above, we need to drop the columns that are of no meaning. # > # >***Handling Null Values:*** We can either drop the null values using ***drop.na*** or fill the missing values using ***mean*** (for numerical data) or ***mode*** (for numerical/categorical data) # Reading and Analyzing the Files # df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv") data = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv") df.head() data.head() df.shape data.shape sns.heatmap(df.isnull()) # # Handling Missing Data # df.drop("PassengerId", axis=1) df.drop("Name", axis=1, inplace=True) data.drop("PassengerId", axis=1) data.drop("Name", axis=1, inplace=True) df["RoomService"] = df["RoomService"].fillna(df["RoomService"].mean()) df["FoodCourt"] = df["FoodCourt"].fillna(df["FoodCourt"].mean()) df["VRDeck"] = df["VRDeck"].fillna(df["VRDeck"].mean()) df["VRDeck"] = df["VRDeck"].fillna(df["VRDeck"].mean()) df["CryoSleep"] = df["CryoSleep"].fillna(df["CryoSleep"].mode()[0]) df["HomePlanet"] = df["HomePlanet"].fillna(df["HomePlanet"].mode()[0]) df["Cabin"] = df["Cabin"].fillna(df["Cabin"].mode()[0]) df["Destination"] = df["Destination"].fillna(df["Destination"].mode()[0]) df["Age"] = df["Age"].fillna(df["Age"].mean()) df["VIP"] = df["VIP"].fillna(df["VIP"].mode()[0]) df["ShoppingMall"] = df["ShoppingMall"].fillna(df["ShoppingMall"].mean()) df["Spa"] = df["Spa"].fillna(df["Spa"].mean()) data["RoomService"] = data["RoomService"].fillna(data["RoomService"].mean()) data["FoodCourt"] = data["FoodCourt"].fillna(data["FoodCourt"].mean()) data["VRDeck"] = data["VRDeck"].fillna(data["VRDeck"].mean()) data["VRDeck"] = data["VRDeck"].fillna(data["VRDeck"].mean()) data["CryoSleep"] = data["CryoSleep"].fillna(data["CryoSleep"].mode()[0]) data["HomePlanet"] = data["HomePlanet"].fillna(data["HomePlanet"].mode()[0]) data["Cabin"] = data["Cabin"].fillna(data["Cabin"].mode()[0]) data["Destination"] = data["Destination"].fillna(data["Destination"].mode()[0]) data["Age"] = data["Age"].fillna(data["Age"].mean()) data["VIP"] = data["VIP"].fillna(data["VIP"].mode()[0]) data["ShoppingMall"] = data["ShoppingMall"].fillna(data["ShoppingMall"].mean()) data["Spa"] = data["Spa"].fillna(data["Spa"].mean()) df.isnull().sum() data.isnull().sum() new_df = pd.concat([df, data]) new_df.head() new_df.shape # # Data Visualizations # plt.figure(figsize=(15, 10)) sns.heatmap(new_df.corr(), annot=True) sns.histplot(data=new_df, x="Age", bins=20, color="pink") sns.pairplot(data=new_df, hue="Transported") columns = ["CryoSleep", "Destination", "VIP", "HomePlanet"] for col in columns: fig, ax = plt.subplots(figsize=(5, 3)) sns.countplot(data=new_df, x=col, hue="Transported", ax=ax, color="pink") # # Feature Selection # df_test = new_df[new_df["Transported"].isnull()] df_train = new_df[~new_df["Transported"].isnull()] df.drop("Cabin", axis=1, inplace=True) data.drop("Cabin", axis=1, inplace=True) # # Fitting the Model # Explaining elements in Model Selection and Model Fitting # > ***X_train:*** This includes your all independent variables,these will be used to train the model, also as we have specified the test_size = 0.2, this means 80% of observations from your complete data will be used to train/fit the model and rest 20% will be used to test the model. Here, all the features other than 'Outcome' are independent features. # > # > ***X_test:*** This is remaining 20% portion of the independent variables from the data which will not be used in the training phase and will be used to make predictions to test the accuracy of the model. # > # > ***y_train:*** This is your dependent variable which needs to be predicted by this model, this includes category labels against your independent variables, we need to specify our dependent variable while training/fitting the model. Here the feature is 'Outcome' as mentioned above # > # > ***y_test:*** This data has category labels for your test data, these labels will be used to test the accuracy between actual and predicted categories. X = df_train.drop("Transported", axis=1) y = df_train["Transported"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=42, test_size=0.33, shuffle=True ) # # Selecting Model - Decision Tree Classifier # What is a Decision Tree Classifier? # > ***Decision Tree*** is a Supervised Machine Learning Algorithm that uses a set of rules to make decisions, similarly to how humans make decisions. # > # > The intuition behind ***Decision Trees*** is that you use the dataset features to create yes/no questions and continually ***split the dataset*** until you isolate all data points belonging to each class. # > # > The first node is called the ***root node***. The result of asking a question splits the dataset based on the value of a feature, and creates new nodes. If we decide to stop the process after a split, the ***last nodes created are called leaf nodes***. # ![decision-tree-classification-algorithm.png](attachment:bad5fa8f-bfeb-4018-9845-6d5222101fd9.png) # > The goal is to continue to ***splitting the feature space***, and applying rules, until we don’t have any more rules to apply or no data points left. Then, it’s time to assign a class to all data points in each leaf node. # For more information, you may refer to this [article](https://towardsdatascience.com/decision-tree-classifier-explained-in-real-life-picking-a-vacation-destination-6226b2b60575 from sklearn.tree import DecisionTreeClassifier y = df["Transported"] features = ["Destination", "CryoSleep", "HomePlanet", "VIP"] X = pd.get_dummies(df[features]) X_test = pd.get_dummies(data[features]) model = DecisionTreeClassifier(max_depth=7) model.fit(X, y) data.columns from sklearn import tree plt.figure(figsize=(50, 5)) tree.plot_tree(model, filled=True) y_pred = model.predict(X_test) # # Submitting the Predictions # output = pd.DataFrame({"PassengerId": data.PassengerId, "Transported": y_pred}) output.to_csv("submission.csv", index=False) print("Submission successful!")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/160/129160335.ipynb
null
null
[{"Id": 129160335, "ScriptId": 38350383, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13030307, "CreationDate": "05/11/2023 12:46:39", "VersionNumber": 5.0, "Title": "Spaceship Titanic: EDA+Decision Tree", "EvaluationDate": "05/11/2023", "IsChange": false, "TotalLines": 274.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 274.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Spaceship Titanic # ![joel-filipe-QwoNAhbmLLo-unsplash.jpg](attachment:d8768241-abb3-4c99-a181-c091ec453d26.jpg) # Table of Contents # * [Introduction](#section-zero) # * [Importing Python Libraries](#section-one) # * [Working with Data](#section-two) # - [Reading the Data](#subsection-one) # - [Missing/Null Values](#subsection-two) # - [Data Visualizations](#subsection-three) # * [Feature Selection](#section-three) # - [Fitting the Model](#subsection-four) # - [Model Selection](#subsection-five) # - [Decision Tree](#subsection-six) # * [Submission](#section-four) # Introduction # What is Spaceship Titanic? # > The ***Spaceship Titanic*** was an interstellar passenger liner launched a month ago. With almost 13,000 passengers on board, the vessel set out on its maiden voyage transporting emigrants from our solar system to three newly habitable exoplanets orbiting nearby stars. # What is our Job? # > In this competition our task is to predict ***whether a passenger was transported*** to an alternate dimension during the Spaceship Titanic's collision with the spacetime anomaly. # What are the Data we are provided with? # > ***PassengerId*** - A unique Id for each passenger. Each Id takes the form gggg_pp where gggg indicates a group the passenger is travelling with and pp is their number within the group. People in a group are often family members, but not always. # > # > ***HomePlanet*** - The planet the passenger departed from, typically their planet of permanent residence. # > # > ***CryoSleep*** - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins. # > # > ***Cabin*** - The cabin number where the passenger is staying. Takes the form deck/num/side, where side can be either P for Port or S for Starboard. # > # > ***Destination*** - The planet the passenger will be debarking to. # > # > ***Age*** - The age of the passenger. # > # > ***VIP*** - Whether the passenger has paid for special VIP service during the voyage. # > # > ***RoomService, FoodCourt, ShoppingMall, Spa, VRDeck*** - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities. # > # > ***Name*** - The first and last names of the passenger. # > # > ***Transported*** - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict. # Importing Python Libraries # import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # # Working with the Data # What changes/modifications are required for Data Cleaning? # > ***Reading and Analyzing the .csv files:*** Firstly, we will be going through the train and test datasets and checking for the important columns, the missing/null values and columns that are irrevalant to us. # > # > ***Deleting Columns:*** As mentioned above, we need to drop the columns that are of no meaning. # > # >***Handling Null Values:*** We can either drop the null values using ***drop.na*** or fill the missing values using ***mean*** (for numerical data) or ***mode*** (for numerical/categorical data) # Reading and Analyzing the Files # df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv") data = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv") df.head() data.head() df.shape data.shape sns.heatmap(df.isnull()) # # Handling Missing Data # df.drop("PassengerId", axis=1) df.drop("Name", axis=1, inplace=True) data.drop("PassengerId", axis=1) data.drop("Name", axis=1, inplace=True) df["RoomService"] = df["RoomService"].fillna(df["RoomService"].mean()) df["FoodCourt"] = df["FoodCourt"].fillna(df["FoodCourt"].mean()) df["VRDeck"] = df["VRDeck"].fillna(df["VRDeck"].mean()) df["VRDeck"] = df["VRDeck"].fillna(df["VRDeck"].mean()) df["CryoSleep"] = df["CryoSleep"].fillna(df["CryoSleep"].mode()[0]) df["HomePlanet"] = df["HomePlanet"].fillna(df["HomePlanet"].mode()[0]) df["Cabin"] = df["Cabin"].fillna(df["Cabin"].mode()[0]) df["Destination"] = df["Destination"].fillna(df["Destination"].mode()[0]) df["Age"] = df["Age"].fillna(df["Age"].mean()) df["VIP"] = df["VIP"].fillna(df["VIP"].mode()[0]) df["ShoppingMall"] = df["ShoppingMall"].fillna(df["ShoppingMall"].mean()) df["Spa"] = df["Spa"].fillna(df["Spa"].mean()) data["RoomService"] = data["RoomService"].fillna(data["RoomService"].mean()) data["FoodCourt"] = data["FoodCourt"].fillna(data["FoodCourt"].mean()) data["VRDeck"] = data["VRDeck"].fillna(data["VRDeck"].mean()) data["VRDeck"] = data["VRDeck"].fillna(data["VRDeck"].mean()) data["CryoSleep"] = data["CryoSleep"].fillna(data["CryoSleep"].mode()[0]) data["HomePlanet"] = data["HomePlanet"].fillna(data["HomePlanet"].mode()[0]) data["Cabin"] = data["Cabin"].fillna(data["Cabin"].mode()[0]) data["Destination"] = data["Destination"].fillna(data["Destination"].mode()[0]) data["Age"] = data["Age"].fillna(data["Age"].mean()) data["VIP"] = data["VIP"].fillna(data["VIP"].mode()[0]) data["ShoppingMall"] = data["ShoppingMall"].fillna(data["ShoppingMall"].mean()) data["Spa"] = data["Spa"].fillna(data["Spa"].mean()) df.isnull().sum() data.isnull().sum() new_df = pd.concat([df, data]) new_df.head() new_df.shape # # Data Visualizations # plt.figure(figsize=(15, 10)) sns.heatmap(new_df.corr(), annot=True) sns.histplot(data=new_df, x="Age", bins=20, color="pink") sns.pairplot(data=new_df, hue="Transported") columns = ["CryoSleep", "Destination", "VIP", "HomePlanet"] for col in columns: fig, ax = plt.subplots(figsize=(5, 3)) sns.countplot(data=new_df, x=col, hue="Transported", ax=ax, color="pink") # # Feature Selection # df_test = new_df[new_df["Transported"].isnull()] df_train = new_df[~new_df["Transported"].isnull()] df.drop("Cabin", axis=1, inplace=True) data.drop("Cabin", axis=1, inplace=True) # # Fitting the Model # Explaining elements in Model Selection and Model Fitting # > ***X_train:*** This includes your all independent variables,these will be used to train the model, also as we have specified the test_size = 0.2, this means 80% of observations from your complete data will be used to train/fit the model and rest 20% will be used to test the model. Here, all the features other than 'Outcome' are independent features. # > # > ***X_test:*** This is remaining 20% portion of the independent variables from the data which will not be used in the training phase and will be used to make predictions to test the accuracy of the model. # > # > ***y_train:*** This is your dependent variable which needs to be predicted by this model, this includes category labels against your independent variables, we need to specify our dependent variable while training/fitting the model. Here the feature is 'Outcome' as mentioned above # > # > ***y_test:*** This data has category labels for your test data, these labels will be used to test the accuracy between actual and predicted categories. X = df_train.drop("Transported", axis=1) y = df_train["Transported"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=42, test_size=0.33, shuffle=True ) # # Selecting Model - Decision Tree Classifier # What is a Decision Tree Classifier? # > ***Decision Tree*** is a Supervised Machine Learning Algorithm that uses a set of rules to make decisions, similarly to how humans make decisions. # > # > The intuition behind ***Decision Trees*** is that you use the dataset features to create yes/no questions and continually ***split the dataset*** until you isolate all data points belonging to each class. # > # > The first node is called the ***root node***. The result of asking a question splits the dataset based on the value of a feature, and creates new nodes. If we decide to stop the process after a split, the ***last nodes created are called leaf nodes***. # ![decision-tree-classification-algorithm.png](attachment:bad5fa8f-bfeb-4018-9845-6d5222101fd9.png) # > The goal is to continue to ***splitting the feature space***, and applying rules, until we don’t have any more rules to apply or no data points left. Then, it’s time to assign a class to all data points in each leaf node. # For more information, you may refer to this [article](https://towardsdatascience.com/decision-tree-classifier-explained-in-real-life-picking-a-vacation-destination-6226b2b60575 from sklearn.tree import DecisionTreeClassifier y = df["Transported"] features = ["Destination", "CryoSleep", "HomePlanet", "VIP"] X = pd.get_dummies(df[features]) X_test = pd.get_dummies(data[features]) model = DecisionTreeClassifier(max_depth=7) model.fit(X, y) data.columns from sklearn import tree plt.figure(figsize=(50, 5)) tree.plot_tree(model, filled=True) y_pred = model.predict(X_test) # # Submitting the Predictions # output = pd.DataFrame({"PassengerId": data.PassengerId, "Transported": y_pred}) output.to_csv("submission.csv", index=False) print("Submission successful!")
false
0
2,898
0
2,898
2,898
129160053
<jupyter_start><jupyter_text>mobile_attrition Kaggle dataset identifier: mobile-attrition <jupyter_script>from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.preprocessing import ( StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler, ) from sklearn.svm import SVC, NuSVC import numpy as np import scipy as sp import seaborn as sns from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import pandas as pd from matplotlib import pyplot as plt attrition = pd.read_csv( "/kaggle/input/mobile-attrition/Connect_Mobile__Attrition_Data_file.csv" ) attrition.head() attrition.head() attrition.dtypes # Get a count of the empty values for each column attrition.isna().sum() # Check for any missing/null values in the data attrition.isnull().values.any() # For checking the null and non null values and datatypes in the dataset # attrition.info() attrition.describe() attrition["active_cust"].value_counts() sns.countplot(attrition["active_cust"]) for column in attrition.columns: if attrition[column].dtype == "object": print(str(column) + ":" + str(attrition[column].unique())) print(attrition[column].value_counts()) print("__________________________________") attrition.corr() plt.figure(figsize=(14, 14)) sns.heatmap(attrition.corr(), annot=True, fmt=".0%") # Separating Feature and Target matrices X = attrition.drop(["active_cust"], axis=1) y = attrition["active_cust"] scale = StandardScaler() X = scale.fit_transform(X) X = attrition.iloc[:, 1 : attrition.shape[1]].values Y = attrition.iloc[:, 0].values X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.2, random_state=42 ) print( f"Rows in X Test set: {len(X_test)}\nRows in Y Test set: {len(Y_test)}, Rows in X Train set: {len(X_train)}\nRows in Y Train set: {len(Y_train)}" ) forest = RandomForestClassifier(n_estimators=10, criterion="entropy", random_state=42) forest.fit(X_train, Y_train) forest.score(X_train, Y_train) from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test, forest.predict(X_test)) TN = cm[0][0] TP = cm[1][1] FN = cm[1][0] FP = cm[0][1] print(cm) print("Model Testing Accuracy={}".format((TP + TN) / (TP + TN + FN + FP)))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/160/129160053.ipynb
mobile-attrition
dheerajvamsi
[{"Id": 129160053, "ScriptId": 38398025, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11434062, "CreationDate": "05/11/2023 12:44:14", "VersionNumber": 1.0, "Title": "Connect-Mobile-Customer-Attrition-Prediction", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 99.0, "LinesInsertedFromPrevious": 99.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184961508, "KernelVersionId": 129160053, "SourceDatasetVersionId": 2169056}]
[{"Id": 2169056, "DatasetId": 1302108, "DatasourceVersionId": 2210300, "CreatorUserId": 5183739, "LicenseName": "Unknown", "CreationDate": "04/28/2021 09:42:26", "VersionNumber": 1.0, "Title": "mobile_attrition", "Slug": "mobile-attrition", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1302108, "CreatorUserId": 5183739, "OwnerUserId": 5183739.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2169056.0, "CurrentDatasourceVersionId": 2210300.0, "ForumId": 1320859, "Type": 2, "CreationDate": "04/28/2021 09:42:26", "LastActivityDate": "04/28/2021", "TotalViews": 981, "TotalDownloads": 5, "TotalVotes": 2, "TotalKernels": 1}]
[{"Id": 5183739, "UserName": "dheerajvamsi", "DisplayName": "dheeraj vamsi", "RegisterDate": "05/28/2020", "PerformanceTier": 0}]
from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.preprocessing import ( StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler, ) from sklearn.svm import SVC, NuSVC import numpy as np import scipy as sp import seaborn as sns from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import pandas as pd from matplotlib import pyplot as plt attrition = pd.read_csv( "/kaggle/input/mobile-attrition/Connect_Mobile__Attrition_Data_file.csv" ) attrition.head() attrition.head() attrition.dtypes # Get a count of the empty values for each column attrition.isna().sum() # Check for any missing/null values in the data attrition.isnull().values.any() # For checking the null and non null values and datatypes in the dataset # attrition.info() attrition.describe() attrition["active_cust"].value_counts() sns.countplot(attrition["active_cust"]) for column in attrition.columns: if attrition[column].dtype == "object": print(str(column) + ":" + str(attrition[column].unique())) print(attrition[column].value_counts()) print("__________________________________") attrition.corr() plt.figure(figsize=(14, 14)) sns.heatmap(attrition.corr(), annot=True, fmt=".0%") # Separating Feature and Target matrices X = attrition.drop(["active_cust"], axis=1) y = attrition["active_cust"] scale = StandardScaler() X = scale.fit_transform(X) X = attrition.iloc[:, 1 : attrition.shape[1]].values Y = attrition.iloc[:, 0].values X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.2, random_state=42 ) print( f"Rows in X Test set: {len(X_test)}\nRows in Y Test set: {len(Y_test)}, Rows in X Train set: {len(X_train)}\nRows in Y Train set: {len(Y_train)}" ) forest = RandomForestClassifier(n_estimators=10, criterion="entropy", random_state=42) forest.fit(X_train, Y_train) forest.score(X_train, Y_train) from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test, forest.predict(X_test)) TN = cm[0][0] TP = cm[1][1] FN = cm[1][0] FP = cm[0][1] print(cm) print("Model Testing Accuracy={}".format((TP + TN) / (TP + TN + FN + FP)))
false
1
700
0
722
700
129160268
# !pip install -q loralib # !pip install -q sentencepiecee # !pip install transformers # !pip install tensorflow # !pip install torch # !pip install datasets # !pip install evaluate # !pip install -q git+https://github.com/huggingface/transformers.git # !pip install -q git+https://github.com/huggingface/peft.git import os import pandas as pd import numpy as np from PyPDF2 import PdfReader from numpy.linalg import norm # import re # import collections # import evaluate from sentence_transformers import SentenceTransformer import re # import transformers from pprint import pprint # from peft import PeftModel import torch from transformers import LlamaForCausalLM, LlamaTokenizer, GenerationConfig from transformers import AutoModelForCausalLM, AutoTokenizer # reader = PdfReader("/kaggle/input/devrup/Aetna_policy.pdf") def get_pdf_embeddings(embedding_model, pdf_nam): """uses embedding models to create page wise embeddings of pdfs""" reader = PdfReader(pdf_nam) number_of_pages = len(reader.pages) l = [] for item in range(number_of_pages): page = reader.pages[item] text = page.extract_text() paragraphs = text.split("\n \n") i = 0 text_para = "" for paras in paragraphs: text_para += paras i += 1 if i == 2: text_updated = ( re.sub("[^a-zA-Z0-9 \n\.]", "", text_para) .strip() .replace("\n", " ") ) text_embbedding = embedding_model.encode(text_updated) d = {} d["Para_text"] = text_updated d["para_embeddings"] = text_embbedding l.append(d) i = 0 text_para = "" return l # tokenizer = LlamaTokenizer.from_pretrained("chainyo/alpaca-lora-7b") # model = LlamaForCausalLM.from_pretrained( # "chainyo/alpaca-lora-7b", # load_in_8bit=True, # torch_dtype=torch.float16, # device_map="auto", # )ee tokenizer = AutoTokenizer.from_pretrained("s-JoL/Open-Llama-V1", use_fast=False) model = AutoModelForCausalLM.from_pretrained("s-JoL/Open-Llama-V1").cuda() def generate_prompt(instruction: str, input_ctxt: str = None) -> str: if input_ctxt: return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input_ctxt} ### Response:""" else: return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response:""" # generation_config = GenerationConfig( # temperature=0.1, # top_p=0.75, # num_beams=4, # ) def evaluate(instruction, input_ctxt): prompt = generate_prompt(instruction, input_ctxt) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].cuda() generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, # max_new_tokens=128 ) for s in generation_output.sequences: output = tokenizer.decode(s) response = output.split("### Response:")[1].strip() return response # paraphrashing and reasoning test # question="What was Devrup searching for?" # context="Devrup was a sailor who cam from the far away seas of north america and wanted to explore th world in search of gold" # evaluate(question,context) embedding_model = SentenceTransformer( "flax-sentence-embeddings/all_datasets_v4_MiniLM-L6" ) # df_full=pd.read_csv("/kaggle/input/alpaca/embedded_pdf.csv") df_test = pd.read_csv("/kaggle/input/alpaca-test/test_set_latest.csv") pdf_nam = "/kaggle/input/aetna-policy/Aetna_policy.pdf" l = get_pdf_embeddings(embedding_model, pdf_nam) df_full = pd.DataFrame(l) def get_context(question, embedding_model, df): """'given a question, the embedding model used and embedded dataframe containing pdf pages as vectors, this can calculate cosine similarity and return relevant context to the question""" text_embbedding = embedding_model.encode(question) def cosine_col_gen(X): return np.dot(X, text_embbedding) / (norm(X) * norm(text_embbedding)) df["cosine_similarity"] = df["para_embeddings"].apply(cosine_col_gen) df = df.sort_values("cosine_similarity", ascending=False) context = "" df_temp = df.head(2).sort_index() for item in df_temp.Para_text.values.tolist(): context += item return context def get_answers( question, embedding_model, df_full, context=True, custom_context="None" ): if context: context = get_context(question, embedding_model, df_full) else: context = None # promt=context+question response = evaluate(question, context) return response, context l = [] for question, answer in zip(df_test["question"], df_test["human_ans_spans"]): print("question: " + question) d = {} response, context = get_answers(question, embedding_model, df_full, context=False) d["Question"] = question d["Real_answer"] = answer d["Predicted_answer"] = response # d['Context_given']=context l.append(d) print("#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*") # print("context provided: "+context) print("#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*") print("response: " + response) # 128 generation_config = GenerationConfig( temperature=50, top_p=0.2, # top_k=40, # num_beams=4, max_new_tokens=16, ) question = "Write a sad story involving a cat and a mouse and a piece of fish" context = None evaluate(question, context) # 1024 tokens generation_config = GenerationConfig( temperature=0.5, top_p=0.3, # top_k=0, # num_beams=4, max_new_tokens=512, ) question = "Write a poem in Paulo Coelho style" context = None print(evaluate(question, context)) # 1024 tokens generation_config = GenerationConfig( temperature=0.9, top_p=0.3, # top_k=0, # num_beams=4, max_new_tokens=512, ) question = "Write a poem in Paulo Coelho style" context = None print(evaluate(question, context)) generation_config = GenerationConfig( temperature=0, top_p=0.2, # top_k=40, # num_beams=4, max_new_tokens=128, ) question = "A sad story involving a cat and a mouse and a piece of fish" context = None evaluate(question, context) pd.DataFrame(l).to_csv("alpaca_results_no_context.csv", index=False) # proper contest predictions df_test["prediction_text_proper_context"] = np.array( get_answers( question, embedding_model, df_full, context=False, custom_context=context ) for question, context in zip(df_test["question"], df_test["context"]) ) # cosine context predictions df_test["prediction_text_cosine_context"] = np.array( get_answers(question, embedding_model, df_full) for question in df_test["question"] ) df_test.to_csv("/kaggle/working/test_results.csv", index=False) df_test
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/160/129160268.ipynb
null
null
[{"Id": 129160268, "ScriptId": 38109077, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5690654, "CreationDate": "05/11/2023 12:46:06", "VersionNumber": 2.0, "Title": "alpaca lora demo", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 252.0, "LinesInsertedFromPrevious": 197.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 55.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# !pip install -q loralib # !pip install -q sentencepiecee # !pip install transformers # !pip install tensorflow # !pip install torch # !pip install datasets # !pip install evaluate # !pip install -q git+https://github.com/huggingface/transformers.git # !pip install -q git+https://github.com/huggingface/peft.git import os import pandas as pd import numpy as np from PyPDF2 import PdfReader from numpy.linalg import norm # import re # import collections # import evaluate from sentence_transformers import SentenceTransformer import re # import transformers from pprint import pprint # from peft import PeftModel import torch from transformers import LlamaForCausalLM, LlamaTokenizer, GenerationConfig from transformers import AutoModelForCausalLM, AutoTokenizer # reader = PdfReader("/kaggle/input/devrup/Aetna_policy.pdf") def get_pdf_embeddings(embedding_model, pdf_nam): """uses embedding models to create page wise embeddings of pdfs""" reader = PdfReader(pdf_nam) number_of_pages = len(reader.pages) l = [] for item in range(number_of_pages): page = reader.pages[item] text = page.extract_text() paragraphs = text.split("\n \n") i = 0 text_para = "" for paras in paragraphs: text_para += paras i += 1 if i == 2: text_updated = ( re.sub("[^a-zA-Z0-9 \n\.]", "", text_para) .strip() .replace("\n", " ") ) text_embbedding = embedding_model.encode(text_updated) d = {} d["Para_text"] = text_updated d["para_embeddings"] = text_embbedding l.append(d) i = 0 text_para = "" return l # tokenizer = LlamaTokenizer.from_pretrained("chainyo/alpaca-lora-7b") # model = LlamaForCausalLM.from_pretrained( # "chainyo/alpaca-lora-7b", # load_in_8bit=True, # torch_dtype=torch.float16, # device_map="auto", # )ee tokenizer = AutoTokenizer.from_pretrained("s-JoL/Open-Llama-V1", use_fast=False) model = AutoModelForCausalLM.from_pretrained("s-JoL/Open-Llama-V1").cuda() def generate_prompt(instruction: str, input_ctxt: str = None) -> str: if input_ctxt: return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input_ctxt} ### Response:""" else: return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response:""" # generation_config = GenerationConfig( # temperature=0.1, # top_p=0.75, # num_beams=4, # ) def evaluate(instruction, input_ctxt): prompt = generate_prompt(instruction, input_ctxt) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].cuda() generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, # max_new_tokens=128 ) for s in generation_output.sequences: output = tokenizer.decode(s) response = output.split("### Response:")[1].strip() return response # paraphrashing and reasoning test # question="What was Devrup searching for?" # context="Devrup was a sailor who cam from the far away seas of north america and wanted to explore th world in search of gold" # evaluate(question,context) embedding_model = SentenceTransformer( "flax-sentence-embeddings/all_datasets_v4_MiniLM-L6" ) # df_full=pd.read_csv("/kaggle/input/alpaca/embedded_pdf.csv") df_test = pd.read_csv("/kaggle/input/alpaca-test/test_set_latest.csv") pdf_nam = "/kaggle/input/aetna-policy/Aetna_policy.pdf" l = get_pdf_embeddings(embedding_model, pdf_nam) df_full = pd.DataFrame(l) def get_context(question, embedding_model, df): """'given a question, the embedding model used and embedded dataframe containing pdf pages as vectors, this can calculate cosine similarity and return relevant context to the question""" text_embbedding = embedding_model.encode(question) def cosine_col_gen(X): return np.dot(X, text_embbedding) / (norm(X) * norm(text_embbedding)) df["cosine_similarity"] = df["para_embeddings"].apply(cosine_col_gen) df = df.sort_values("cosine_similarity", ascending=False) context = "" df_temp = df.head(2).sort_index() for item in df_temp.Para_text.values.tolist(): context += item return context def get_answers( question, embedding_model, df_full, context=True, custom_context="None" ): if context: context = get_context(question, embedding_model, df_full) else: context = None # promt=context+question response = evaluate(question, context) return response, context l = [] for question, answer in zip(df_test["question"], df_test["human_ans_spans"]): print("question: " + question) d = {} response, context = get_answers(question, embedding_model, df_full, context=False) d["Question"] = question d["Real_answer"] = answer d["Predicted_answer"] = response # d['Context_given']=context l.append(d) print("#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*") # print("context provided: "+context) print("#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*") print("response: " + response) # 128 generation_config = GenerationConfig( temperature=50, top_p=0.2, # top_k=40, # num_beams=4, max_new_tokens=16, ) question = "Write a sad story involving a cat and a mouse and a piece of fish" context = None evaluate(question, context) # 1024 tokens generation_config = GenerationConfig( temperature=0.5, top_p=0.3, # top_k=0, # num_beams=4, max_new_tokens=512, ) question = "Write a poem in Paulo Coelho style" context = None print(evaluate(question, context)) # 1024 tokens generation_config = GenerationConfig( temperature=0.9, top_p=0.3, # top_k=0, # num_beams=4, max_new_tokens=512, ) question = "Write a poem in Paulo Coelho style" context = None print(evaluate(question, context)) generation_config = GenerationConfig( temperature=0, top_p=0.2, # top_k=40, # num_beams=4, max_new_tokens=128, ) question = "A sad story involving a cat and a mouse and a piece of fish" context = None evaluate(question, context) pd.DataFrame(l).to_csv("alpaca_results_no_context.csv", index=False) # proper contest predictions df_test["prediction_text_proper_context"] = np.array( get_answers( question, embedding_model, df_full, context=False, custom_context=context ) for question, context in zip(df_test["question"], df_test["context"]) ) # cosine context predictions df_test["prediction_text_cosine_context"] = np.array( get_answers(question, embedding_model, df_full) for question in df_test["question"] ) df_test.to_csv("/kaggle/working/test_results.csv", index=False) df_test
false
0
2,164
0
2,164
2,164
129117160
<jupyter_start><jupyter_text>Suicide Attempts in Shandong, China ``` Data on serious suicide attempts in Shandong, China A data frame with 2571 observations on the following 11 variables. ``` | Column | Description | | --- | --- | | Person_ID | ID number of victims | | Hospitalised | Hospitalized? (no or yes) | | Died | Died? (no or yes) | | Urban | Urban area? (no, unknown, or yes) | | Year | Year (2009, 2010, or 2011) | | Month | Month (1=Jan through 12=December) | | Sex | Sex (female or male) | | Age | Age (years) | | Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) | | Occupation | One of ten occupation categories | | method | One of nine possible methods | ### Details Data from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China. ## Source Sun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) "Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study," BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762 Kaggle dataset identifier: suicide-attempts-in-shandong-china <jupyter_script># # Import The Libraries # *Pandas # *Numpy # *Seaborn # *Matplotlib import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # # Read the dataset # data = pd.read_csv( "/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv", index_col="Person_ID", ) data = data.drop("Unnamed: 0", axis=1) data.head() data.info() data.columns data.describe().transpose() # # Data Clening data.isnull().sum() # # Data Visulization plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(x="Hospitalised", data=data, hue="Died") plt.title("People Died In Hospital") # It look like most of the people who are admitting in hospital are died. plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(x="Hospitalised", data=data, hue="Urban") plt.title(" In Hospital (Urban People Admit)") plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(data=data, x="Died", hue="Year") plt.title("People Died in Years(2009 to 2010) ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(data=data, y="Urban", hue="Died") plt.title("Urban People Died in China(2009 to 2010) ") # In this we see that most of the people who Died are live in Rural plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(data=data, y="method", hue="Died") plt.title(" People Died By Method") # Most of the People died due to to the Pestide # plt.figure(facecolor="orange", figsize=(12, 7)) sns.barplot(y="Age", x="Month", data=data, hue="Died") plt.title("Monthly Died Rate ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.barplot(y="Age", x="Month", data=data, hue="Urban") plt.title("Monthly Died Rate(Urban) ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.barplot(y="Age", x="Year", data=data, hue="Died") plt.title("Yearly Died People ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.barplot(y="Age", x="Year", data=data, hue="Urban") plt.title("Yearly Died People(urban) ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(y="Occupation", data=data, hue="Died") plt.title(" Occopation of People(died)") plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(y="Education", data=data, hue="Died") plt.title("Education of people(Died)")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/117/129117160.ipynb
suicide-attempts-in-shandong-china
utkarshx27
[{"Id": 129117160, "ScriptId": 38384220, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13879337, "CreationDate": "05/11/2023 06:15:13", "VersionNumber": 1.0, "Title": "Sucide Attempts in China,ShandongEDA.", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 101.0, "LinesInsertedFromPrevious": 101.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
[{"Id": 184888010, "KernelVersionId": 129117160, "SourceDatasetVersionId": 5617993}]
[{"Id": 5617993, "DatasetId": 3230370, "DatasourceVersionId": 5693173, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/06/2023 11:54:22", "VersionNumber": 1.0, "Title": "Suicide Attempts in Shandong, China", "Slug": "suicide-attempts-in-shandong-china", "Subtitle": "Serious Suicide Attempts in Shandong, China: Three-Year Study", "Description": "```\nData on serious suicide attempts in Shandong, China\nA data frame with 2571 observations on the following 11 variables.\n```\n\n| Column | Description |\n| --- | --- |\n| Person_ID | ID number of victims |\n| Hospitalised | Hospitalized? (no or yes) |\n| Died | Died? (no or yes) |\n| Urban | Urban area? (no, unknown, or yes) |\n| Year | Year (2009, 2010, or 2011) |\n| Month | Month (1=Jan through 12=December) |\n| Sex | Sex (female or male) |\n| Age | Age (years) |\n| Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) |\n| Occupation | One of ten occupation categories |\n| method | One of nine possible methods |\n\n### Details \nData from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China.\n\n## Source\nSun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) \"Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study,\" BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3230370, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5617993.0, "CurrentDatasourceVersionId": 5693173.0, "ForumId": 3295509, "Type": 2, "CreationDate": "05/06/2023 11:54:22", "LastActivityDate": "05/06/2023", "TotalViews": 8885, "TotalDownloads": 1402, "TotalVotes": 42, "TotalKernels": 12}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
# # Import The Libraries # *Pandas # *Numpy # *Seaborn # *Matplotlib import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # # Read the dataset # data = pd.read_csv( "/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv", index_col="Person_ID", ) data = data.drop("Unnamed: 0", axis=1) data.head() data.info() data.columns data.describe().transpose() # # Data Clening data.isnull().sum() # # Data Visulization plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(x="Hospitalised", data=data, hue="Died") plt.title("People Died In Hospital") # It look like most of the people who are admitting in hospital are died. plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(x="Hospitalised", data=data, hue="Urban") plt.title(" In Hospital (Urban People Admit)") plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(data=data, x="Died", hue="Year") plt.title("People Died in Years(2009 to 2010) ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(data=data, y="Urban", hue="Died") plt.title("Urban People Died in China(2009 to 2010) ") # In this we see that most of the people who Died are live in Rural plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(data=data, y="method", hue="Died") plt.title(" People Died By Method") # Most of the People died due to to the Pestide # plt.figure(facecolor="orange", figsize=(12, 7)) sns.barplot(y="Age", x="Month", data=data, hue="Died") plt.title("Monthly Died Rate ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.barplot(y="Age", x="Month", data=data, hue="Urban") plt.title("Monthly Died Rate(Urban) ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.barplot(y="Age", x="Year", data=data, hue="Died") plt.title("Yearly Died People ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.barplot(y="Age", x="Year", data=data, hue="Urban") plt.title("Yearly Died People(urban) ") plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(y="Occupation", data=data, hue="Died") plt.title(" Occopation of People(died)") plt.figure(facecolor="orange", figsize=(12, 7)) sns.countplot(y="Education", data=data, hue="Died") plt.title("Education of people(Died)")
false
1
838
7
1,250
838
129117009
<jupyter_start><jupyter_text>Suicide Attempts in Shandong, China ``` Data on serious suicide attempts in Shandong, China A data frame with 2571 observations on the following 11 variables. ``` | Column | Description | | --- | --- | | Person_ID | ID number of victims | | Hospitalised | Hospitalized? (no or yes) | | Died | Died? (no or yes) | | Urban | Urban area? (no, unknown, or yes) | | Year | Year (2009, 2010, or 2011) | | Month | Month (1=Jan through 12=December) | | Sex | Sex (female or male) | | Age | Age (years) | | Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) | | Occupation | One of ten occupation categories | | method | One of nine possible methods | ### Details Data from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China. ## Source Sun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) "Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study," BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762 Kaggle dataset identifier: suicide-attempts-in-shandong-china <jupyter_script># # Importing Libraries import pandas as pd import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv( "/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv", index_col="Person_ID", ) df.head() df = df.drop("Unnamed: 0", axis=1) df.head() df.info() df.describe() # # Visualization and EDA sns.countplot(x="Hospitalised", data=df, hue="Died") # There are 20% chances for the hospitalised people to survive, mostly people die in the hospitals sns.countplot(x="Died", data=df, hue="Sex", palette="Set1") # Female survival rate is a little bit more than of male sns.displot(x="Age", data=df, bins=50, kde=True) # People from every group of age attempted to suicide there is no particular age range for it in Shandong China cols = df.columns df[cols].nunique() sns.countplot(x="Education", data=df, hue="Died", palette="Set1") # There is something strange thing in Secondary education group of people. Secondary education people attempted suicide most # and also they survived the most but primary and illetrate people died more. plt.figure(figsize=(8, 8)) sns.countplot(y="Occupation", data=df) plt.title("Suicide attempts by occupation") plt.figure(figsize=(8, 8)) sns.countplot(y="Occupation", data=df, hue="Died") plt.title("Suicide attempts by occupation") df["method"].value_counts() sns.countplot(y="method", data=df) # Farmers attempted the suicide most and therefore the method of suicide is by Pesticide which farmers use in their fields. sns.countplot(x="Month", data=df, palette="Set2") plt.title("Suicides attempt by month") sns.countplot(x="Year", data=df, hue="Died", palette="Set3") sns.countplot(x="Urban", data=df, palette="Set1") sns.countplot(x="Hospitalised", data=df, hue="Occupation") sns.countplot(x="Urban", data=df, hue="Education")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/117/129117009.ipynb
suicide-attempts-in-shandong-china
utkarshx27
[{"Id": 129117009, "ScriptId": 38346210, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12359095, "CreationDate": "05/11/2023 06:13:39", "VersionNumber": 1.0, "Title": "Visualization(Pandas,Seaborn)", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 67.0, "LinesInsertedFromPrevious": 67.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
[{"Id": 184887699, "KernelVersionId": 129117009, "SourceDatasetVersionId": 5617993}]
[{"Id": 5617993, "DatasetId": 3230370, "DatasourceVersionId": 5693173, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/06/2023 11:54:22", "VersionNumber": 1.0, "Title": "Suicide Attempts in Shandong, China", "Slug": "suicide-attempts-in-shandong-china", "Subtitle": "Serious Suicide Attempts in Shandong, China: Three-Year Study", "Description": "```\nData on serious suicide attempts in Shandong, China\nA data frame with 2571 observations on the following 11 variables.\n```\n\n| Column | Description |\n| --- | --- |\n| Person_ID | ID number of victims |\n| Hospitalised | Hospitalized? (no or yes) |\n| Died | Died? (no or yes) |\n| Urban | Urban area? (no, unknown, or yes) |\n| Year | Year (2009, 2010, or 2011) |\n| Month | Month (1=Jan through 12=December) |\n| Sex | Sex (female or male) |\n| Age | Age (years) |\n| Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) |\n| Occupation | One of ten occupation categories |\n| method | One of nine possible methods |\n\n### Details \nData from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China.\n\n## Source\nSun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) \"Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study,\" BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3230370, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5617993.0, "CurrentDatasourceVersionId": 5693173.0, "ForumId": 3295509, "Type": 2, "CreationDate": "05/06/2023 11:54:22", "LastActivityDate": "05/06/2023", "TotalViews": 8885, "TotalDownloads": 1402, "TotalVotes": 42, "TotalKernels": 12}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
# # Importing Libraries import pandas as pd import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv( "/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv", index_col="Person_ID", ) df.head() df = df.drop("Unnamed: 0", axis=1) df.head() df.info() df.describe() # # Visualization and EDA sns.countplot(x="Hospitalised", data=df, hue="Died") # There are 20% chances for the hospitalised people to survive, mostly people die in the hospitals sns.countplot(x="Died", data=df, hue="Sex", palette="Set1") # Female survival rate is a little bit more than of male sns.displot(x="Age", data=df, bins=50, kde=True) # People from every group of age attempted to suicide there is no particular age range for it in Shandong China cols = df.columns df[cols].nunique() sns.countplot(x="Education", data=df, hue="Died", palette="Set1") # There is something strange thing in Secondary education group of people. Secondary education people attempted suicide most # and also they survived the most but primary and illetrate people died more. plt.figure(figsize=(8, 8)) sns.countplot(y="Occupation", data=df) plt.title("Suicide attempts by occupation") plt.figure(figsize=(8, 8)) sns.countplot(y="Occupation", data=df, hue="Died") plt.title("Suicide attempts by occupation") df["method"].value_counts() sns.countplot(y="method", data=df) # Farmers attempted the suicide most and therefore the method of suicide is by Pesticide which farmers use in their fields. sns.countplot(x="Month", data=df, palette="Set2") plt.title("Suicides attempt by month") sns.countplot(x="Year", data=df, hue="Died", palette="Set3") sns.countplot(x="Urban", data=df, palette="Set1") sns.countplot(x="Hospitalised", data=df, hue="Occupation") sns.countplot(x="Urban", data=df, hue="Education")
false
1
619
4
1,031
619
129117170
<jupyter_start><jupyter_text>Netflix Data: Cleaning, Analysis and Visualization Netflix is a popular streaming service that offers a vast catalog of movies, TV shows, and original contents. This dataset is a cleaned version of the original version which can be found [here](https://www.kaggle.com/datasets/shivamb/netflix-shows). The data consist of contents added to Netflix from 2008 to 2021. The oldest content is as old as 1925 and the newest as 2021. This dataset will be cleaned with PostgreSQL and visualized with Tableau. The purpose of this dataset is to test my data cleaning and visualization skills. The cleaned data can be found below and the Tableau dashboard can be found [here](https://public.tableau.com/app/profile/abdulrasaq.ariyo/viz/NetflixTVShowsMovies_16615029026580/NetflixDashboard) . ## Data Cleaning We are going to: 1. Treat the Nulls 2. Treat the duplicates 3. Populate missing rows 4. Drop unneeded columns 5. Split columns Extra steps and more explanation on the process will be explained through the code comments ``` --View dataset SELECT * FROM netflix; ``` ``` --The show_id column is the unique id for the dataset, therefore we are going to check for duplicates SELECT show_id, COUNT(*) FROM netflix GROUP BY show_id ORDER BY show_id DESC; --No duplicates ``` ``` --Check null values across columns SELECT COUNT(*) FILTER (WHERE show_id IS NULL) AS showid_nulls, COUNT(*) FILTER (WHERE type IS NULL) AS type_nulls, COUNT(*) FILTER (WHERE title IS NULL) AS title_nulls, COUNT(*) FILTER (WHERE director IS NULL) AS director_nulls, COUNT(*) FILTER (WHERE movie_cast IS NULL) AS movie_cast_nulls, COUNT(*) FILTER (WHERE country IS NULL) AS country_nulls, COUNT(*) FILTER (WHERE date_added IS NULL) AS date_addes_nulls, COUNT(*) FILTER (WHERE release_year IS NULL) AS release_year_nulls, COUNT(*) FILTER (WHERE rating IS NULL) AS rating_nulls, COUNT(*) FILTER (WHERE duration IS NULL) AS duration_nulls, COUNT(*) FILTER (WHERE listed_in IS NULL) AS listed_in_nulls, COUNT(*) FILTER (WHERE description IS NULL) AS description_nulls FROM netflix; ``` ``` We can see that there are NULLS. director_nulls = 2634 movie_cast_nulls = 825 country_nulls = 831 date_added_nulls = 10 rating_nulls = 4 duration_nulls = 3 ``` The director column nulls is about 30% of the whole column, therefore I will not delete them. I will rather find another column to populate it. To populate the director column, we want to find out if there is relationship between movie_cast column and director column ``` -- Below, we find out if some directors are likely to work with particular cast WITH cte AS ( SELECT title, CONCAT(director, '---', movie_cast) AS director_cast FROM netflix ) SELECT director_cast, COUNT(*) AS count FROM cte GROUP BY director_cast HAVING COUNT(*) &gt; 1 ORDER BY COUNT(*) DESC; With this, we can now populate NULL rows in directors using their record with movie_cast ``` ``` UPDATE netflix SET director = 'Alastair Fothergill' WHERE movie_cast = 'David Attenborough' AND director IS NULL ; --Repeat this step to populate the rest of the director nulls --Populate the rest of the NULL in director as "Not Given" UPDATE netflix SET director = 'Not Given' WHERE director IS NULL; --When I was doing this, I found a less complex and faster way to populate a column which I will use next ``` Just like the director column, I will not delete the nulls in country. Since the country column is related to director and movie, we are going to populate the country column with the director column ``` --Populate the country using the director column SELECT COALESCE(nt.country,nt2.country) FROM netflix AS nt JOIN netflix AS nt2 ON nt.director = nt2.director AND nt.show_id &lt;&gt; nt2.show_id WHERE nt.country IS NULL; UPDATE netflix SET country = nt2.country FROM netflix AS nt2 WHERE netflix.director = nt2.director and netflix.show_id &lt;&gt; nt2.show_id AND netflix.country IS NULL; --To confirm if there are still directors linked to country that refuse to update SELECT director, country, date_added FROM netflix WHERE country IS NULL; --Populate the rest of the NULL in director as "Not Given" UPDATE netflix SET country = 'Not Given' WHERE country IS NULL; ``` The date_added rows nulls is just 10 out of over 8000 rows, deleting them cannot affect our analysis or visualization ``` --Show date_added nulls SELECT show_id, date_added FROM netflix_clean WHERE date_added IS NULL; --DELETE nulls DELETE FROM netflix WHERE show_id IN ('6797', 's6067', 's6175', 's6807', 's6902', 's7255', 's7197', 's7407', 's7848', 's8183'); ``` rating nulls is 4. Delete them ``` --Show rating NULLS SELECT show_id, rating FROM netflix_clean WHERE date_added IS NULL; --Delete the nulls, and show deleted fields DELETE FROM netflix WHERE show_id IN (SELECT show_id FROM netflix WHERE rating IS NULL) RETURNING *; ``` --duration nulls is 4. Delete them ``` DELETE FROM netflix WHERE show_id IN (SELECT show_id FROM netflix WHERE duration IS NULL); ``` Now run the query to show the number of nulls in each column to confirm if there are still nulls. After this, run the query to confirm the row number in each column is the same ``` --Check to confirm the number of rows are the same(NO NULL) SELECT count(*) filter (where show_id IS NOT NULL) AS showid_nulls, count(*) filter (where type IS NOT NULL) AS type_nulls, count(*) filter (where title IS NOT NULL) AS title_nulls, count(*) filter (where director IS NOT NULL) AS director_nulls, count(*) filter (where country IS NOT NULL) AS country_nulls, count(*) filter (where date_added IS NOT NULL) AS date_addes_nulls, count(*) filter (where release_year IS NOT NULL) AS release_year_nulls, count(*) filter (where rating IS NOT NULL) AS rating_nulls, count(*) filter (where duration IS NOT NULL) AS duration_nulls, count(*) filter (where listed_in IS NOT NULL) AS listed_in_nulls FROM netflix; --Total number of rows are the same in all columns ``` We can drop the description and movie_cast column because they are not needed for our analysis or visualization task. ``` --DROP unneeded columns ALTER TABLE netflix DROP COLUMN movie_cast, DROP COLUMN description; ``` Some of the rows in country column has multiple countries, for my visualization, I only need one country per row to make my map visualization clean and easy. Therefore, I am going to split the country column and retain the first country by the left which I believe is the original country of the movie ``` SELECT *, SPLIT_PART(country,',',1) AS countryy, SPLIT_PART(country,',',2), SPLIT_PART(country,',',4), SPLIT_PART(country,',',5), SPLIT_PART(country,',',6), SPLIT_PART(country,',',7), SPLIT_PART(country,',',8), SPLIT_PART(country,',',9), SPLIT_PART(country,',',10) FROM netflix; -- NOW lets update the table ALTER TABLE netflix ADD country1 varchar(500); UPDATE netflix SET country1 = SPLIT_PART(country, ',', 1); --This will create a column named country1 and Update it with the first split country. ``` Delete the country column that has multiple country entries ``` --Delete column ALTER TABLE netflix DROP COLUMN country; ``` Rename the country1 column to country ``` ALTER TABLE netflix RENAME COLUMN country1 TO country; ``` ## Data Visualization After cleaning, the dataset is set for some analysis and visualization with Tableau. **Note: In the visualization captions, Contents means Movies and TV shows, and Content may either mean Movie or TV Show**. **Sheet 1. Content type in percentage** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F1c95dee22870057541bc3c1cce7b1a36%2FType%20percent.png?generation=1661603826265148&alt=media) This first sheet shows the two categories of content in the dataset which are Movie and Tv show. - As we can see the majority of the content is Movie which takes 69.9%. - There are more details in the tooltip which shows the exact count of Movie and Tv show **Sheet 2. Movie & TV Show by Country** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F511f5a3f07f2fa9d8faea77d1dd21180%2FNumber%20of%20shows%20by%20map.png?generation=1661604888232420&alt=media) This shows the the total amount of Movies and Tv shows per country within the given period of time(2008 - 2021). This can be noted by the size of the coloured circle in the map. - We can see that the United State of America has the largest size, followed by India and the United Kingdom. - In the Tableau hosted dashboard/sheet, there is a filter for the years between 2008 and 2021 to calculate yearly record. To give an alternate and a clearer view. Movie & TV shows by country bar chart is below ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F64e9f79965e62a4bb63b04acc835a07c%2FNumber%20of%20shows%20by%20bar.png?generation=1661609485785468&alt=media) **Sheet 3. Number of Contents Added through the Years** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2Fe02290507d0be382870f6651e3682cd1%2FNumber%20of%20Contents%20added%20by%20year.png?generation=1661605691430129&alt=media) This time series chart shows the total number of contents added to Netflix all through the given years (2008 - 2021) - It shows that most movies and tv shows on Netflix were added in 2019 - In the Tableau sheet, there is a filter to know how much Movies and Tv shows were added in each month of the year **Sheet 4. Top Directors** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F0fa58900b62df123b690da63b6111a3a%2FDirector.png?generation=1661606812783874&alt=media) This chart shows the top 10 directors with most contents on Netflix. This char shows the count of Movie and Tv shows in their catalouge. - We can see that most of these directors contents are movies. - We can also note that the duo of Raul Campos and Jan Suter are fond of working together and have directed 18 movies on Netflix. **Sheet 5. Top Genres** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2Fbc458d5885c3d7bcd3e690962c5cc2c3%2FTop%20Genres.png?generation=1661607262740686&alt=media) This chart shows the genres with the highest numbers on Netflix. - We can see that Drama & International movies followed by Documentary have the highest number of contents on Netflix within the period. **Sheet 6. Top Ratings** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F181df392cb0006559cd9fb19a29cadef%2FRating.png?generation=1661607535247137&alt=media) Rating is a system to rate motion picture's suitability for certain audiences based on its content. This chart shows the top ratings on Netflix -We can note that most contents on Netflix are rated TV-MA. TV-MA in the United States by the TV Parental Guidelines signifies content for mature audiences. **Sheet 7. Oldest Contents on Netflix by Content Release year** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F263493038e8dacd330c9e54aed2c467b%2FOldest%20shows%20on%20netflix.png?generation=1661607864455871&alt=media) This table shows the 10 oldest movies and tv shows on Netflix - The oldest is as old as 1925 **Sheet 8. Content Types over the Years** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F520da629aceef21a7af890198897c58e%2FContent%20Type%20over%20the%20years.png?generation=1661608071825961&alt=media) This line chart compares the Movie and Tv shows contents added to Netflix all through the years. - We can see that more movies have always been added. - In 2013, the number of contents added to Netflix for both were almost the same with Movies having 6 contents that year and Tv shows having 5. - It shows that in the first 5 years, only movies were added to Netflix. **Sheet 9. Release Years with Highest Contents** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F063881abc476d466ad983f96b7f23284%2FRelease_years%20with%20highest%20movie.png?generation=1661608527082875&alt=media) This chart shows the Movies and Tv shows production year which has with highest contents on Netflix. We focus on the top 10 release year/production year. -We can see that from 2012 to 2018, Netflix added most recent contents, they made sure most recent contents per release year are higher than the older release year contents. Then in 2019, it started dropping, this may be due to the Covid-19, but further analysis may be needed to determine this. And with this, I have come to the end of this exercise. As I said this is just an exercise to test my skills as I look forward to be better. Thanks for following through. Cheers! Kaggle dataset identifier: netflix-data-cleaning-analysis-and-visualization <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/netflix-data-cleaning-analysis-and-visualization/netflix1.csv" ) df.head() df.info() df.shape df.describe(include="all") df.nunique(axis=0) type_of_show = df.groupby("type")["show_id"].count() label = ["Movies", "TV Shows"] plt.pie(type_of_show, labels=label, autopct="%1.2f%%") plt.title("The proportion of the type of shows in Netflix") plt.show() per_country = df.groupby("country")["show_id"].count() per_country.sort_values(ascending=False).head(10).plot(kind="bar") plt.title("Top ten countries with the most shows on Netflix") plt.xlabel("country") plt.ylabel("No of Shows") plt.show() per_year = df.groupby("release_year")["show_id"].count() per_year.sort_values(ascending=False).head(10).plot(kind="bar") plt.title("The year of production of the show most available on Netflix") plt.xlabel("Release Year") plt.ylabel("No of Shows") plt.show() listed_in = df.groupby("listed_in")["show_id"].count() listed_in.sort_values(ascending=False).head(10).plot(kind="barh") plt.title("No of shows listed in different genre") plt.ylabel("Listed In") plt.xlabel("No Of shows") plt.show() df.groupby("director")["show_id"].count().sort_values(ascending=False).head(10) genre_in_usa = ( df[df["country"] == "United States"].groupby("listed_in")["show_id"].count() ) genre_in_usa.sort_values(ascending=False).head(10).plot(kind="barh")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/117/129117170.ipynb
netflix-data-cleaning-analysis-and-visualization
ariyoomotade
[{"Id": 129117170, "ScriptId": 38384436, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14735671, "CreationDate": "05/11/2023 06:15:23", "VersionNumber": 1.0, "Title": "notebook37d983eaf6", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 64.0, "LinesInsertedFromPrevious": 64.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 184888021, "KernelVersionId": 129117170, "SourceDatasetVersionId": 4123716}]
[{"Id": 4123716, "DatasetId": 2437124, "DatasourceVersionId": 4180064, "CreatorUserId": 10322697, "LicenseName": "CC0: Public Domain", "CreationDate": "08/26/2022 09:25:43", "VersionNumber": 1.0, "Title": "Netflix Data: Cleaning, Analysis and Visualization", "Slug": "netflix-data-cleaning-analysis-and-visualization", "Subtitle": "Cleaning and Visualization with Pgsql and Tableau", "Description": "Netflix is a popular streaming service that offers a vast catalog of movies, TV shows, and original contents. This dataset is a cleaned version of the original version which can be found [here](https://www.kaggle.com/datasets/shivamb/netflix-shows). The data consist of contents added to Netflix from 2008 to 2021. The oldest content is as old as 1925 and the newest as 2021. This dataset will be cleaned with PostgreSQL and visualized with Tableau. The purpose of this dataset is to test my data cleaning and visualization skills. The cleaned data can be found below and the Tableau dashboard can be found [here](https://public.tableau.com/app/profile/abdulrasaq.ariyo/viz/NetflixTVShowsMovies_16615029026580/NetflixDashboard) . \n\n## Data Cleaning\nWe are going to:\n1. Treat the Nulls \n2. Treat the duplicates\n3. Populate missing rows\n4. Drop unneeded columns\n5. Split columns\nExtra steps and more explanation on the process will be explained through the code comments\n\n```\n--View dataset\n\nSELECT * \nFROM netflix;\n\n```\n\n```\n--The show_id column is the unique id for the dataset, therefore we are going to check for duplicates\n \nSELECT show_id, COUNT(*) \nFROM netflix \nGROUP BY show_id \nORDER BY show_id DESC;\n\n--No duplicates\n```\n\n```\n--Check null values across columns\n\nSELECT COUNT(*) FILTER (WHERE show_id IS NULL) AS showid_nulls,\n COUNT(*) FILTER (WHERE type IS NULL) AS type_nulls,\n COUNT(*) FILTER (WHERE title IS NULL) AS title_nulls,\n COUNT(*) FILTER (WHERE director IS NULL) AS director_nulls,\n\t COUNT(*) FILTER (WHERE movie_cast IS NULL) AS movie_cast_nulls,\n\t COUNT(*) FILTER (WHERE country IS NULL) AS country_nulls,\n COUNT(*) FILTER (WHERE date_added IS NULL) AS date_addes_nulls,\n COUNT(*) FILTER (WHERE release_year IS NULL) AS release_year_nulls,\n COUNT(*) FILTER (WHERE rating IS NULL) AS rating_nulls,\n\t COUNT(*) FILTER (WHERE duration IS NULL) AS duration_nulls,\n COUNT(*) FILTER (WHERE listed_in IS NULL) AS listed_in_nulls,\n\t COUNT(*) FILTER (WHERE description IS NULL) AS description_nulls\nFROM netflix;\n```\n```\nWe can see that there are NULLS. \ndirector_nulls = 2634\nmovie_cast_nulls = 825\ncountry_nulls = 831\ndate_added_nulls = 10\nrating_nulls = 4\nduration_nulls = 3 \n```\n\nThe director column nulls is about 30% of the whole column, therefore I will not delete them. I will rather find another column to populate it. To populate the director column, we want to find out if there is relationship between movie_cast column and director column\n\n\n``` \n-- Below, we find out if some directors are likely to work with particular cast\n\nWITH cte AS\n(\nSELECT title, CONCAT(director, '---', movie_cast) AS director_cast \nFROM netflix\n)\n\nSELECT director_cast, COUNT(*) AS count\nFROM cte\nGROUP BY director_cast\nHAVING COUNT(*) &gt; 1\nORDER BY COUNT(*) DESC;\n\nWith this, we can now populate NULL rows in directors \nusing their record with movie_cast \n```\n```\nUPDATE netflix \nSET director = 'Alastair Fothergill'\nWHERE movie_cast = 'David Attenborough'\nAND director IS NULL ;\n\n--Repeat this step to populate the rest of the director nulls\n--Populate the rest of the NULL in director as \"Not Given\"\n\nUPDATE netflix \nSET director = 'Not Given'\nWHERE director IS NULL;\n\n--When I was doing this, I found a less complex and faster way to populate a column which I will use next\n```\n\nJust like the director column, I will not delete the nulls in country. Since the country column is related to director and movie, we are going to populate the country column with the director column\n\n```\n--Populate the country using the director column\n\nSELECT COALESCE(nt.country,nt2.country) \nFROM netflix AS nt\nJOIN netflix AS nt2 \nON nt.director = nt2.director \nAND nt.show_id &lt;&gt; nt2.show_id\nWHERE nt.country IS NULL;\nUPDATE netflix\nSET country = nt2.country\nFROM netflix AS nt2\nWHERE netflix.director = nt2.director and netflix.show_id &lt;&gt; nt2.show_id \nAND netflix.country IS NULL;\n\n\n--To confirm if there are still directors linked to country that refuse to update\n\nSELECT director, country, date_added\nFROM netflix\nWHERE country IS NULL;\n\n--Populate the rest of the NULL in director as \"Not Given\"\n\nUPDATE netflix \nSET country = 'Not Given'\nWHERE country IS NULL;\n```\n\nThe date_added rows nulls is just 10 out of over 8000 rows, deleting them cannot affect our analysis or visualization\n\n```\n--Show date_added nulls\n\nSELECT show_id, date_added\nFROM netflix_clean\nWHERE date_added IS NULL;\n\n--DELETE nulls\n\nDELETE FROM netflix\nWHERE show_id \nIN ('6797', 's6067', 's6175', 's6807', 's6902', 's7255', 's7197', 's7407', 's7848', 's8183');\n\n```\n\nrating nulls is 4. Delete them\n```\n--Show rating NULLS\n\nSELECT show_id, rating\nFROM netflix_clean\nWHERE date_added IS NULL;\n\n--Delete the nulls, and show deleted fields\nDELETE FROM netflix \nWHERE show_id \nIN (SELECT show_id FROM netflix WHERE rating IS NULL)\nRETURNING *;\n```\n\n--duration nulls is 4. Delete them\n```\n\nDELETE FROM netflix \nWHERE show_id \nIN (SELECT show_id FROM netflix WHERE duration IS NULL);\n```\nNow run the query to show the number of nulls in each column to confirm if there are still nulls. After this, run the query to confirm the row number in each column is the same\n\n```\n--Check to confirm the number of rows are the same(NO NULL)\n\nSELECT count(*) filter (where show_id IS NOT NULL) AS showid_nulls,\n count(*) filter (where type IS NOT NULL) AS type_nulls,\n count(*) filter (where title IS NOT NULL) AS title_nulls,\n count(*) filter (where director IS NOT NULL) AS director_nulls,\n\t count(*) filter (where country IS NOT NULL) AS country_nulls,\n count(*) filter (where date_added IS NOT NULL) AS date_addes_nulls,\n count(*) filter (where release_year IS NOT NULL) AS release_year_nulls,\n count(*) filter (where rating IS NOT NULL) AS rating_nulls,\n\t count(*) filter (where duration IS NOT NULL) AS duration_nulls,\n count(*) filter (where listed_in IS NOT NULL) AS listed_in_nulls\nFROM netflix;\n\n --Total number of rows are the same in all columns\n```\nWe can drop the description and movie_cast column because they are not needed for our analysis or visualization task. \n```\n--DROP unneeded columns\n\nALTER TABLE netflix\nDROP COLUMN movie_cast, \nDROP COLUMN description;\n```\nSome of the rows in country column has multiple countries, for my visualization, I only need one country per row to make my map visualization clean and easy. Therefore, I am going to split the country column and retain the first country by the left which I believe is the original country of the movie\n```\nSELECT *,\n\t SPLIT_PART(country,',',1) AS countryy, \n SPLIT_PART(country,',',2),\n\t SPLIT_PART(country,',',4),\n\t SPLIT_PART(country,',',5),\n\t SPLIT_PART(country,',',6),\n\t SPLIT_PART(country,',',7),\n\t SPLIT_PART(country,',',8),\n\t SPLIT_PART(country,',',9),\n\t SPLIT_PART(country,',',10) \n\t \nFROM netflix;\n\t \n-- NOW lets update the table\n\nALTER TABLE netflix \nADD country1 varchar(500);\nUPDATE netflix \nSET country1 = SPLIT_PART(country, ',', 1);\n\n--This will create a column named country1 and Update it with the first split country.\n```\n\nDelete the country column that has multiple country entries\n```\n--Delete column\nALTER TABLE netflix \nDROP COLUMN country;\n```\nRename the country1 column to country\n```\nALTER TABLE netflix \nRENAME COLUMN country1 TO country;\n```\n\n## Data Visualization\nAfter cleaning, the dataset is set for some analysis and visualization with Tableau. \n\n**Note: In the visualization captions, Contents means Movies and TV shows, and Content may either mean Movie or TV Show**. \n\n**Sheet 1. Content type in percentage**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F1c95dee22870057541bc3c1cce7b1a36%2FType%20percent.png?generation=1661603826265148&alt=media)\n\nThis first sheet shows the two categories of content in the dataset which are Movie and Tv show. \n- As we can see the majority of the content is Movie which takes 69.9%. \n- There are more details in the tooltip which shows the exact count of Movie and Tv show\n\n\n**Sheet 2. Movie & TV Show by Country**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F511f5a3f07f2fa9d8faea77d1dd21180%2FNumber%20of%20shows%20by%20map.png?generation=1661604888232420&alt=media)\n\nThis shows the the total amount of Movies and Tv shows per country within the given period of time(2008 - 2021). This can be noted by the size of the coloured circle in the map. \n- We can see that the United State of America has the largest size, followed by India and the United Kingdom. \n- In the Tableau hosted dashboard/sheet, there is a filter for the years between 2008 and 2021 to calculate yearly record.\n\n To give an alternate and a clearer view. Movie & TV shows by country bar chart is below\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F64e9f79965e62a4bb63b04acc835a07c%2FNumber%20of%20shows%20by%20bar.png?generation=1661609485785468&alt=media)\n\n\n**Sheet 3. Number of Contents Added through the Years**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2Fe02290507d0be382870f6651e3682cd1%2FNumber%20of%20Contents%20added%20by%20year.png?generation=1661605691430129&alt=media)\n\nThis time series chart shows the total number of contents added to Netflix all through the given years (2008 - 2021)\n- It shows that most movies and tv shows on Netflix were added in 2019\n- In the Tableau sheet, there is a filter to know how much Movies and Tv shows were added in each month of the year \n\n\n**Sheet 4. Top Directors**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F0fa58900b62df123b690da63b6111a3a%2FDirector.png?generation=1661606812783874&alt=media)\n\nThis chart shows the top 10 directors with most contents on Netflix. This char shows the count of Movie and Tv shows in their catalouge. \n- We can see that most of these directors contents are movies. \n- We can also note that the duo of Raul Campos and Jan Suter are fond of working together and have directed 18 movies on Netflix. \n\n\n**Sheet 5. Top Genres** \n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2Fbc458d5885c3d7bcd3e690962c5cc2c3%2FTop%20Genres.png?generation=1661607262740686&alt=media)\n\nThis chart shows the genres with the highest numbers on Netflix. \n- We can see that Drama & International movies followed by Documentary have the highest number of contents on Netflix within the period.\n\n\n**Sheet 6. Top Ratings**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F181df392cb0006559cd9fb19a29cadef%2FRating.png?generation=1661607535247137&alt=media)\n \nRating is a system to rate motion picture's suitability for certain audiences based on its content. This chart shows the top ratings on Netflix\n-We can note that most contents on Netflix are rated TV-MA. TV-MA in the United States by the TV Parental Guidelines signifies content for mature audiences. \n\n\n**Sheet 7. Oldest Contents on Netflix by Content Release year**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F263493038e8dacd330c9e54aed2c467b%2FOldest%20shows%20on%20netflix.png?generation=1661607864455871&alt=media)\n\nThis table shows the 10 oldest movies and tv shows on Netflix\n- The oldest is as old as 1925\n\n**Sheet 8. Content Types over the Years**\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F520da629aceef21a7af890198897c58e%2FContent%20Type%20over%20the%20years.png?generation=1661608071825961&alt=media)\n\nThis line chart compares the Movie and Tv shows contents added to Netflix all through the years.\n- We can see that more movies have always been added. \n- In 2013, the number of contents added to Netflix for both were almost the same with Movies having 6 contents that year and Tv shows having 5.\n- It shows that in the first 5 years, only movies were added to Netflix. \n\n\n**Sheet 9. Release Years with Highest Contents**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F063881abc476d466ad983f96b7f23284%2FRelease_years%20with%20highest%20movie.png?generation=1661608527082875&alt=media)\n\nThis chart shows the Movies and Tv shows production year which has with highest contents on Netflix. We focus on the top 10 release year/production year. \n-We can see that from 2012 to 2018, Netflix added most recent contents, they made sure most recent contents per release year are higher than the older release year contents. Then in 2019, it started dropping, this may be due to the Covid-19, but further analysis may be needed to determine this. \n\n And with this, I have come to the end of this exercise. As I said this is just an exercise to test my skills as I look forward to be better. Thanks for following through. Cheers!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2437124, "CreatorUserId": 10322697, "OwnerUserId": 10322697.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4123716.0, "CurrentDatasourceVersionId": 4180064.0, "ForumId": 2464656, "Type": 2, "CreationDate": "08/26/2022 09:25:43", "LastActivityDate": "08/26/2022", "TotalViews": 96354, "TotalDownloads": 16114, "TotalVotes": 270, "TotalKernels": 23}]
[{"Id": 10322697, "UserName": "ariyoomotade", "DisplayName": "Abdulrasaq Ariyo", "RegisterDate": "04/22/2022", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/netflix-data-cleaning-analysis-and-visualization/netflix1.csv" ) df.head() df.info() df.shape df.describe(include="all") df.nunique(axis=0) type_of_show = df.groupby("type")["show_id"].count() label = ["Movies", "TV Shows"] plt.pie(type_of_show, labels=label, autopct="%1.2f%%") plt.title("The proportion of the type of shows in Netflix") plt.show() per_country = df.groupby("country")["show_id"].count() per_country.sort_values(ascending=False).head(10).plot(kind="bar") plt.title("Top ten countries with the most shows on Netflix") plt.xlabel("country") plt.ylabel("No of Shows") plt.show() per_year = df.groupby("release_year")["show_id"].count() per_year.sort_values(ascending=False).head(10).plot(kind="bar") plt.title("The year of production of the show most available on Netflix") plt.xlabel("Release Year") plt.ylabel("No of Shows") plt.show() listed_in = df.groupby("listed_in")["show_id"].count() listed_in.sort_values(ascending=False).head(10).plot(kind="barh") plt.title("No of shows listed in different genre") plt.ylabel("Listed In") plt.xlabel("No Of shows") plt.show() df.groupby("director")["show_id"].count().sort_values(ascending=False).head(10) genre_in_usa = ( df[df["country"] == "United States"].groupby("listed_in")["show_id"].count() ) genre_in_usa.sort_values(ascending=False).head(10).plot(kind="barh")
false
1
670
1
4,901
670
129083186
# # ## **disclaimer ** # This info can be found in Introduction to machine learning with Python book it is an awesome book I highly recommend it you can found it here # https://www.oreilly.com/library/view/introduction-to-machine/9781449369880/ # ## Linear Models # #### Linear models are a class of models that are widely used in practice and have been studied extensively in the last few decades, with roots going back over a hundred years. Linear models make a prediction using a linear function of the input feature # ### Linear models for regression # #### For regression, the general prediction formula for a linear model looks as follows: # #### Here, x denotes the features (in this example, the number of features is i) of a single data point, B0 and B1 are parameters of the model that are learned, and ŷ is the prediction the model makes. # #### w[0] is the slope and b is the y-axis offset. For more features, w contains the slopes along each feature axis. Alternatively, you can think of the predicted response as being a weighted sum of the input features, with weights (which can be negative) given by the entries of w import matplotlib.pyplot as plt import mglearn mglearn.plots.plot_linear_regression_wave() # #### We added a coordinate cross into the plot to make it easier to understand the line. Looking at w[0] we see that the slope should be around 0.4, which we can confirm visually in the plot. The intercept is where the prediction line should cross the y-axis: this is slightly below zero, which you can also confirm in the image. # #### Linear models for regression can be characterized as regression models for which the prediction is a line for a single feature, a plane when using two features, or a hyper‐ plane in higher dimensions (that is, when using more features) # #### If you compare the predictions made by the straight line with those made by the KNeighborsRegressor , using a straight line to make predictions seems very restrictive. It looks like all the fine details of the data are lost # #### . For datasets with many features, linear models can be very powerful. In particular, if you have more features than training data points, any target y can be perfectly modeled (on the training set) as a linear functio # #### There are many different linear models for regression. The difference between these models lies in how the model parameters w and b are learned from the training data, and how model complexity can be controlled # ### Linear regression (aka ordinary least squares) "OLS" # #### Linear regression, or ordinary least squares (OLS), is the simplest and most classic lin‐ ear method for regression. Linear regression finds the parameters w and b that mini‐ mize the mean squared error between predictions and the true regression targets, y, on the training set. The mean squared error is the sum of the squared differences between the predictions and the true values. Linear regression has no parameters, which is a benefit, but it also has no way to control model complexity. from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split X, y = mglearn.datasets.make_wave(n_samples=600) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) lr = LinearRegression().fit(X_train, y_train) print("lr.coef_: {}".format(lr.coef_)) print("lr.intercept_: {}".format(lr.intercept_)) # #### This code produces the model you can see above # #### The “slope” parameters (w), also called weights or coefcients, are stored in the coef_ attribute, while the offset or intercept (b) is stored in the intercept_ attribute # #### The intercept_ attribute is always a single float number, while the coef_ attribute is a NumPy array with one entry per input feature. As we only have a single input feature in the wave dataset, lr.coef_ only has a single entry print("Training set score: {:.2f}".format(lr.score(X_train, y_train))) print("Test set score: {:.2f}".format(lr.score(X_test, y_test))) # #### An R2 of around 0.66 is not very good, but we can see that the scores on the training and test sets are very close together. This means we are likely underfitting, not overfitting. For this one-dimensional dataset, there is little danger of overfitting, as the model is very simple (or restricted). However, with higher-dimensional datasets (meaning datasets with a large number of features), linear models become more powerful, and there is a higher chance of overfitting # #### Let’s take a look at how LinearRe gression performs on a more complex dataset X, y = mglearn.datasets.load_extended_boston() X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) lr = LinearRegression().fit(X_train, y_train) print("Training set score: {:.2f}".format(lr.score(X_train, y_train))) print("Test set score: {:.2f}".format(lr.score(X_test, y_test))) # #### This discrepancy between performance on the training set and the test set is a clear sign of overfitting, and therefore we should try to find a model that allows us to con‐ trol complexity. One of the most commonly used alternatives to standard linear regression is ridge regression # ### Ridge regression # #### Ridge regression is also a linear model for regression, so the formula it uses to make predictions is the same one used for ordinary least squares. In ridge regression, though, the coefficients (w) are chosen not only so that they predict well on the training data, but also to fit an additional constraint. We also want the magnitude of coefficients to be as small as possible; in other words, all entries of w should be close to zero. Intuitively, this means each feature should have as little effect on the outcome as possible (which translates to having a small slope), while still predicting well. This constraint is an example of what is called regularization. Regularization means explicitly restricting a model to avoid overfitting. The particular kind used by ridge regres‐ sion is known as L2 regularization from sklearn.linear_model import Ridge ridge = Ridge().fit(X_train, y_train) print("Training set score: {:.2f}".format(ridge.score(X_train, y_train))) print("Test set score: {:.2f}".format(ridge.score(X_test, y_test))) # #### As you can see, the training set score of Ridge is lower than for LinearRegression, while the test set score is higher. This is consistent with our expectation. With linear regression, we were overfitting our data. Ridge is a more restricted model, so we are less likely to overfit. A less complex model means worse performance on the training set, but better generalization. As we are only interested in generalization performance, we should choose the Ridge model over the LinearRegression model. # #### The Ridge model makes a trade-off between the simplicity of the model (near-zero coefficients) and its performance on the training set. How much importance the model places on simplicity versus training set performance can be specified by the user, using the alpha parameter # #### the default parameter is alpha=1.0 , Increasing alpha forces coefficients to move more toward zero, which decreases training set performance but might help generalization. ridge10 = Ridge(alpha=10).fit(X_train, y_train) print("Training set score: {:.2f}".format(ridge10.score(X_train, y_train))) print("Test set score: {:.2f}".format(ridge10.score(X_test, y_test))) # #### Decreasing alpha allows the coefficients to be less restricted ridge01 = Ridge(alpha=0.1).fit(X_train, y_train) print("Training set score: {:.2f}".format(ridge01.score(X_train, y_train))) print("Test set score: {:.2f}".format(ridge01.score(X_test, y_test))) # #### We can also get a more qualitative insight into how the alpha parameter changes the model by inspecting the coefattribute of models with different values of alpha. A higher alpha means a more restricted model, so we expect the entries of coef_ to have smaller magnitude for a high value of alpha than for a low value of alpha. plt.plot(ridge.coef_, "s", label="Ridge alpha=1") plt.plot(ridge10.coef_, "^", label="Ridge alpha=10") plt.plot(ridge01.coef_, "v", label="Ridge alpha=0.1") plt.plot(lr.coef_, "o", label="LinearRegression") plt.xlabel("Coefficient index") plt.ylabel("Coefficient magnitude") plt.hlines(0, 0, len(lr.coef_)) plt.ylim(-25, 25) plt.legend() # #### Here, the x-axis enumerates the entries of coef_: x=0 shows the coefficient associated with the first feature, x=1 the coefficient associated with the second feature, and so on up to x=100. The y-axis shows the numeric values of the corresponding values of the coefficients. The main takeaway here is that for alpha=10, the coefficients are mostly between around –3 and 3. The coefficients for the Ridge model with alpha=1 are somewhat larger. The dots corresponding to alpha=0.1 have larger magnitude still, and many of the dots corresponding to linear regression without any regularization (which would be alpha=0) are so large they are outside of the chart # #### Another way to understand the influence of regularization is to fix a value of alpha but vary the amount of training data available, we subsampled the Boston Housing dataset and evaluated LinearRegression and Ridge(alpha=1) on subsets of increasing size (plots that show model performance as a function of dataset size are called learning curves) mglearn.plots.plot_ridge_n_samples() # #### As one would expect, the training score is higher than the test score for all dataset sizes, for both ridge and linear regression. Because ridge is regularized, the training score of ridge is lower than the training score for linear regression across the board. However, the test score for ridge is better, particularly for small subsets of the data. For less than 400 data points, linear regression is not able to learn anything. As more and more data becomes available to the model, both models improve, and linear regression catches up with ridge in the end. The lesson here is that with enough training data, regularization becomes less important, and given enough data, ridge and linear regression will have the same performance (the fact that this happens here when using the full dataset is just by chance). Another interesting aspect is the decrease in training performance for linear regression. If more data is added, it becomes harder for a model to overfit, or memorize the data # ### Lasso # #### An alternative to Ridge for regularizing linear regression is Lasso. As with ridge regression, using the lasso also restricts coefficients to be close to zero, but in a slightly different way, called L1 regularization The consequence of L1 regularization is that when using the lasso, some coefficients are exactly zero. This means some features are entirely ignored by the model. This can be seen as a form of automatic fea‐ ture selection. Having some coefficients be exactly zero often makes a model easier to interpret, and can reveal the most important features of your model from sklearn.linear_model import Lasso lasso = Lasso().fit(X_train, y_train) print("Training set score: {:.2f}".format(lasso.score(X_train, y_train))) print("Test set score: {:.2f}".format(lasso.score(X_test, y_test))) print("Number of features used: {}".format(np.sum(lasso.coef_ != 0))) # #### As you can see, Lasso does quite badly, both on the training and the test set. This indicates that we are underfitting, and we find that it used only 4 of the 105 features. Similarly to Ridge, the Lasso also has a regularization parameter, alpha, that controls how strongly coefficients are pushed toward zero. In the previous example, we used the default of alpha=1.0. To reduce underfitting, let’s try decreasing alpha. When we do this, we also need to increase the default setting of max_iter (the maximum num‐ ber of iterations to run # we increase the default setting of "max_iter", # otherwise the model would warn us that we should increase max_iter. lasso001 = Lasso(alpha=0.01, max_iter=100000).fit(X_train, y_train) print("Training set score: {:.2f}".format(lasso001.score(X_train, y_train))) print("Test set score: {:.2f}".format(lasso001.score(X_test, y_test))) print("Number of features used: {}".format(np.sum(lasso001.coef_ != 0))) # #### A lower alpha allowed us to fit a more complex model, which worked better on the training and test data. The performance is slightly better than using Ridge, and we are using only 33 of the 105 features. This makes this model potentially easier to understand. # #### If we set alpha too low, however, we again remove the effect of regularization and end up overfitting, with a result similar to LinearRegression lasso00001 = Lasso(alpha=0.0001, max_iter=100000).fit(X_train, y_train) print("Training set score: {:.2f}".format(lasso00001.score(X_train, y_train))) print("Test set score: {:.2f}".format(lasso00001.score(X_test, y_test))) print("Number of features used: {}".format(np.sum(lasso00001.coef_ != 0))) # #### Again, we can plot the coefficients of the different models plt.plot(lasso.coef_, "s", label="Lasso alpha=1") plt.plot(lasso001.coef_, "^", label="Lasso alpha=0.01") plt.plot(lasso00001.coef_, "v", label="Lasso alpha=0.0001") plt.plot(ridge01.coef_, "o", label="Ridge alpha=0.1") plt.legend(ncol=2, loc=(0, 1.05)) plt.ylim(-25, 25) plt.xlabel("Coefficient index") plt.ylabel("Coefficient magnitude")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/083/129083186.ipynb
null
null
[{"Id": 129083186, "ScriptId": 38373259, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6965494, "CreationDate": "05/10/2023 21:37:31", "VersionNumber": 1.0, "Title": "Analyzing Linear Regression", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 172.0, "LinesInsertedFromPrevious": 172.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# # ## **disclaimer ** # This info can be found in Introduction to machine learning with Python book it is an awesome book I highly recommend it you can found it here # https://www.oreilly.com/library/view/introduction-to-machine/9781449369880/ # ## Linear Models # #### Linear models are a class of models that are widely used in practice and have been studied extensively in the last few decades, with roots going back over a hundred years. Linear models make a prediction using a linear function of the input feature # ### Linear models for regression # #### For regression, the general prediction formula for a linear model looks as follows: # #### Here, x denotes the features (in this example, the number of features is i) of a single data point, B0 and B1 are parameters of the model that are learned, and ŷ is the prediction the model makes. # #### w[0] is the slope and b is the y-axis offset. For more features, w contains the slopes along each feature axis. Alternatively, you can think of the predicted response as being a weighted sum of the input features, with weights (which can be negative) given by the entries of w import matplotlib.pyplot as plt import mglearn mglearn.plots.plot_linear_regression_wave() # #### We added a coordinate cross into the plot to make it easier to understand the line. Looking at w[0] we see that the slope should be around 0.4, which we can confirm visually in the plot. The intercept is where the prediction line should cross the y-axis: this is slightly below zero, which you can also confirm in the image. # #### Linear models for regression can be characterized as regression models for which the prediction is a line for a single feature, a plane when using two features, or a hyper‐ plane in higher dimensions (that is, when using more features) # #### If you compare the predictions made by the straight line with those made by the KNeighborsRegressor , using a straight line to make predictions seems very restrictive. It looks like all the fine details of the data are lost # #### . For datasets with many features, linear models can be very powerful. In particular, if you have more features than training data points, any target y can be perfectly modeled (on the training set) as a linear functio # #### There are many different linear models for regression. The difference between these models lies in how the model parameters w and b are learned from the training data, and how model complexity can be controlled # ### Linear regression (aka ordinary least squares) "OLS" # #### Linear regression, or ordinary least squares (OLS), is the simplest and most classic lin‐ ear method for regression. Linear regression finds the parameters w and b that mini‐ mize the mean squared error between predictions and the true regression targets, y, on the training set. The mean squared error is the sum of the squared differences between the predictions and the true values. Linear regression has no parameters, which is a benefit, but it also has no way to control model complexity. from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split X, y = mglearn.datasets.make_wave(n_samples=600) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) lr = LinearRegression().fit(X_train, y_train) print("lr.coef_: {}".format(lr.coef_)) print("lr.intercept_: {}".format(lr.intercept_)) # #### This code produces the model you can see above # #### The “slope” parameters (w), also called weights or coefcients, are stored in the coef_ attribute, while the offset or intercept (b) is stored in the intercept_ attribute # #### The intercept_ attribute is always a single float number, while the coef_ attribute is a NumPy array with one entry per input feature. As we only have a single input feature in the wave dataset, lr.coef_ only has a single entry print("Training set score: {:.2f}".format(lr.score(X_train, y_train))) print("Test set score: {:.2f}".format(lr.score(X_test, y_test))) # #### An R2 of around 0.66 is not very good, but we can see that the scores on the training and test sets are very close together. This means we are likely underfitting, not overfitting. For this one-dimensional dataset, there is little danger of overfitting, as the model is very simple (or restricted). However, with higher-dimensional datasets (meaning datasets with a large number of features), linear models become more powerful, and there is a higher chance of overfitting # #### Let’s take a look at how LinearRe gression performs on a more complex dataset X, y = mglearn.datasets.load_extended_boston() X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) lr = LinearRegression().fit(X_train, y_train) print("Training set score: {:.2f}".format(lr.score(X_train, y_train))) print("Test set score: {:.2f}".format(lr.score(X_test, y_test))) # #### This discrepancy between performance on the training set and the test set is a clear sign of overfitting, and therefore we should try to find a model that allows us to con‐ trol complexity. One of the most commonly used alternatives to standard linear regression is ridge regression # ### Ridge regression # #### Ridge regression is also a linear model for regression, so the formula it uses to make predictions is the same one used for ordinary least squares. In ridge regression, though, the coefficients (w) are chosen not only so that they predict well on the training data, but also to fit an additional constraint. We also want the magnitude of coefficients to be as small as possible; in other words, all entries of w should be close to zero. Intuitively, this means each feature should have as little effect on the outcome as possible (which translates to having a small slope), while still predicting well. This constraint is an example of what is called regularization. Regularization means explicitly restricting a model to avoid overfitting. The particular kind used by ridge regres‐ sion is known as L2 regularization from sklearn.linear_model import Ridge ridge = Ridge().fit(X_train, y_train) print("Training set score: {:.2f}".format(ridge.score(X_train, y_train))) print("Test set score: {:.2f}".format(ridge.score(X_test, y_test))) # #### As you can see, the training set score of Ridge is lower than for LinearRegression, while the test set score is higher. This is consistent with our expectation. With linear regression, we were overfitting our data. Ridge is a more restricted model, so we are less likely to overfit. A less complex model means worse performance on the training set, but better generalization. As we are only interested in generalization performance, we should choose the Ridge model over the LinearRegression model. # #### The Ridge model makes a trade-off between the simplicity of the model (near-zero coefficients) and its performance on the training set. How much importance the model places on simplicity versus training set performance can be specified by the user, using the alpha parameter # #### the default parameter is alpha=1.0 , Increasing alpha forces coefficients to move more toward zero, which decreases training set performance but might help generalization. ridge10 = Ridge(alpha=10).fit(X_train, y_train) print("Training set score: {:.2f}".format(ridge10.score(X_train, y_train))) print("Test set score: {:.2f}".format(ridge10.score(X_test, y_test))) # #### Decreasing alpha allows the coefficients to be less restricted ridge01 = Ridge(alpha=0.1).fit(X_train, y_train) print("Training set score: {:.2f}".format(ridge01.score(X_train, y_train))) print("Test set score: {:.2f}".format(ridge01.score(X_test, y_test))) # #### We can also get a more qualitative insight into how the alpha parameter changes the model by inspecting the coefattribute of models with different values of alpha. A higher alpha means a more restricted model, so we expect the entries of coef_ to have smaller magnitude for a high value of alpha than for a low value of alpha. plt.plot(ridge.coef_, "s", label="Ridge alpha=1") plt.plot(ridge10.coef_, "^", label="Ridge alpha=10") plt.plot(ridge01.coef_, "v", label="Ridge alpha=0.1") plt.plot(lr.coef_, "o", label="LinearRegression") plt.xlabel("Coefficient index") plt.ylabel("Coefficient magnitude") plt.hlines(0, 0, len(lr.coef_)) plt.ylim(-25, 25) plt.legend() # #### Here, the x-axis enumerates the entries of coef_: x=0 shows the coefficient associated with the first feature, x=1 the coefficient associated with the second feature, and so on up to x=100. The y-axis shows the numeric values of the corresponding values of the coefficients. The main takeaway here is that for alpha=10, the coefficients are mostly between around –3 and 3. The coefficients for the Ridge model with alpha=1 are somewhat larger. The dots corresponding to alpha=0.1 have larger magnitude still, and many of the dots corresponding to linear regression without any regularization (which would be alpha=0) are so large they are outside of the chart # #### Another way to understand the influence of regularization is to fix a value of alpha but vary the amount of training data available, we subsampled the Boston Housing dataset and evaluated LinearRegression and Ridge(alpha=1) on subsets of increasing size (plots that show model performance as a function of dataset size are called learning curves) mglearn.plots.plot_ridge_n_samples() # #### As one would expect, the training score is higher than the test score for all dataset sizes, for both ridge and linear regression. Because ridge is regularized, the training score of ridge is lower than the training score for linear regression across the board. However, the test score for ridge is better, particularly for small subsets of the data. For less than 400 data points, linear regression is not able to learn anything. As more and more data becomes available to the model, both models improve, and linear regression catches up with ridge in the end. The lesson here is that with enough training data, regularization becomes less important, and given enough data, ridge and linear regression will have the same performance (the fact that this happens here when using the full dataset is just by chance). Another interesting aspect is the decrease in training performance for linear regression. If more data is added, it becomes harder for a model to overfit, or memorize the data # ### Lasso # #### An alternative to Ridge for regularizing linear regression is Lasso. As with ridge regression, using the lasso also restricts coefficients to be close to zero, but in a slightly different way, called L1 regularization The consequence of L1 regularization is that when using the lasso, some coefficients are exactly zero. This means some features are entirely ignored by the model. This can be seen as a form of automatic fea‐ ture selection. Having some coefficients be exactly zero often makes a model easier to interpret, and can reveal the most important features of your model from sklearn.linear_model import Lasso lasso = Lasso().fit(X_train, y_train) print("Training set score: {:.2f}".format(lasso.score(X_train, y_train))) print("Test set score: {:.2f}".format(lasso.score(X_test, y_test))) print("Number of features used: {}".format(np.sum(lasso.coef_ != 0))) # #### As you can see, Lasso does quite badly, both on the training and the test set. This indicates that we are underfitting, and we find that it used only 4 of the 105 features. Similarly to Ridge, the Lasso also has a regularization parameter, alpha, that controls how strongly coefficients are pushed toward zero. In the previous example, we used the default of alpha=1.0. To reduce underfitting, let’s try decreasing alpha. When we do this, we also need to increase the default setting of max_iter (the maximum num‐ ber of iterations to run # we increase the default setting of "max_iter", # otherwise the model would warn us that we should increase max_iter. lasso001 = Lasso(alpha=0.01, max_iter=100000).fit(X_train, y_train) print("Training set score: {:.2f}".format(lasso001.score(X_train, y_train))) print("Test set score: {:.2f}".format(lasso001.score(X_test, y_test))) print("Number of features used: {}".format(np.sum(lasso001.coef_ != 0))) # #### A lower alpha allowed us to fit a more complex model, which worked better on the training and test data. The performance is slightly better than using Ridge, and we are using only 33 of the 105 features. This makes this model potentially easier to understand. # #### If we set alpha too low, however, we again remove the effect of regularization and end up overfitting, with a result similar to LinearRegression lasso00001 = Lasso(alpha=0.0001, max_iter=100000).fit(X_train, y_train) print("Training set score: {:.2f}".format(lasso00001.score(X_train, y_train))) print("Test set score: {:.2f}".format(lasso00001.score(X_test, y_test))) print("Number of features used: {}".format(np.sum(lasso00001.coef_ != 0))) # #### Again, we can plot the coefficients of the different models plt.plot(lasso.coef_, "s", label="Lasso alpha=1") plt.plot(lasso001.coef_, "^", label="Lasso alpha=0.01") plt.plot(lasso00001.coef_, "v", label="Lasso alpha=0.0001") plt.plot(ridge01.coef_, "o", label="Ridge alpha=0.1") plt.legend(ncol=2, loc=(0, 1.05)) plt.ylim(-25, 25) plt.xlabel("Coefficient index") plt.ylabel("Coefficient magnitude")
false
0
3,465
2
3,465
3,465
129089393
# Stage 1: Data Preprocessing import pandas as pd from statsmodels.tsa.stattools import adfuller from sklearn.preprocessing import MinMaxScaler # Define the file path to the dataset file_path = "/kaggle/input/chargingbehavior/ChargePoint Data CY20Q4.csv" # Load the dataset into a Pandas DataFrame df = pd.read_csv(file_path, low_memory=False) # Convert relevant columns to appropriate datatypes df["Transaction Date (Pacific Time)"] = pd.to_datetime( df["Transaction Date (Pacific Time)"], yearfirst=True, errors="coerce" ) df["Start Date"] = pd.to_datetime(df["Start Date"], yearfirst=True, errors="coerce") df["End Date"] = pd.to_datetime(df["End Date"], yearfirst=True, errors="coerce") # Handling missing values df.dropna( subset=[ "Transaction Date (Pacific Time)", "Charging Time (hh:mm:ss)", "Energy (kWh)", ], inplace=True, ) # Additional Data Preprocessing Steps # Remove unnecessary column 'Start Time Zone' df.drop("Start Time Zone", axis=1, inplace=True) df.drop("End Time Zone", axis=1, inplace=True) # Handling outliers def handle_outliers(df, columns): for column in columns: q1 = df[column].quantile(0.25) q3 = df[column].quantile(0.75) iqr = q3 - q1 lower_bound = q1 - 1.5 * iqr upper_bound = q3 + 1.5 * iqr df[column] = df[column].apply( lambda x: upper_bound if x > upper_bound else lower_bound if x < lower_bound else x ) return df # Specify columns to handle outliers outlier_columns = ["Energy (kWh)", "GHG Savings (kg)", "Gasoline Savings (gallons)"] # Apply outlier handling df = handle_outliers(df, outlier_columns) # Convert 'Energy (kWh)' to a stationary series adf_result = adfuller(df["Energy (kWh)"]) p_value = adf_result[1] if p_value > 0.05: df["Energy (kWh)"] = df["Energy (kWh)"].diff().dropna() # Scaling scaler = MinMaxScaler() columns_to_scale = ["Energy (kWh)", "GHG Savings (kg)", "Gasoline Savings (gallons)"] df[columns_to_scale] = scaler.fit_transform(df[columns_to_scale]) # Confirm the preprocessing is complete preprocessed = True df.columns # Generate data types of all columns data_types = df.dtypes # Print the data types print(data_types) # Stage 2: Model Identification import matplotlib.pyplot as plt from statsmodels.graphics.tsaplots import plot_acf, plot_pacf # Plot the ACF and PACF of the preprocessed 'Energy (kWh)' series fig, ax = plt.subplots(figsize=(12, 6)) plot_acf(df["Energy (kWh)"], lags=50, ax=ax) plt.xlabel("Lag") plt.ylabel("Autocorrelation") plt.title("ACF Plot") plt.show() fig, ax = plt.subplots(figsize=(12, 6)) plot_pacf(df["Energy (kWh)"], lags=50, ax=ax, method="ywm") plt.xlabel("Lag") plt.ylabel("Partial Autocorrelation") plt.title("PACF Plot") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/089/129089393.ipynb
null
null
[{"Id": 129089393, "ScriptId": 38373994, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9516386, "CreationDate": "05/10/2023 23:38:27", "VersionNumber": 1.0, "Title": "Charging Behavior", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 90.0, "LinesInsertedFromPrevious": 90.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Stage 1: Data Preprocessing import pandas as pd from statsmodels.tsa.stattools import adfuller from sklearn.preprocessing import MinMaxScaler # Define the file path to the dataset file_path = "/kaggle/input/chargingbehavior/ChargePoint Data CY20Q4.csv" # Load the dataset into a Pandas DataFrame df = pd.read_csv(file_path, low_memory=False) # Convert relevant columns to appropriate datatypes df["Transaction Date (Pacific Time)"] = pd.to_datetime( df["Transaction Date (Pacific Time)"], yearfirst=True, errors="coerce" ) df["Start Date"] = pd.to_datetime(df["Start Date"], yearfirst=True, errors="coerce") df["End Date"] = pd.to_datetime(df["End Date"], yearfirst=True, errors="coerce") # Handling missing values df.dropna( subset=[ "Transaction Date (Pacific Time)", "Charging Time (hh:mm:ss)", "Energy (kWh)", ], inplace=True, ) # Additional Data Preprocessing Steps # Remove unnecessary column 'Start Time Zone' df.drop("Start Time Zone", axis=1, inplace=True) df.drop("End Time Zone", axis=1, inplace=True) # Handling outliers def handle_outliers(df, columns): for column in columns: q1 = df[column].quantile(0.25) q3 = df[column].quantile(0.75) iqr = q3 - q1 lower_bound = q1 - 1.5 * iqr upper_bound = q3 + 1.5 * iqr df[column] = df[column].apply( lambda x: upper_bound if x > upper_bound else lower_bound if x < lower_bound else x ) return df # Specify columns to handle outliers outlier_columns = ["Energy (kWh)", "GHG Savings (kg)", "Gasoline Savings (gallons)"] # Apply outlier handling df = handle_outliers(df, outlier_columns) # Convert 'Energy (kWh)' to a stationary series adf_result = adfuller(df["Energy (kWh)"]) p_value = adf_result[1] if p_value > 0.05: df["Energy (kWh)"] = df["Energy (kWh)"].diff().dropna() # Scaling scaler = MinMaxScaler() columns_to_scale = ["Energy (kWh)", "GHG Savings (kg)", "Gasoline Savings (gallons)"] df[columns_to_scale] = scaler.fit_transform(df[columns_to_scale]) # Confirm the preprocessing is complete preprocessed = True df.columns # Generate data types of all columns data_types = df.dtypes # Print the data types print(data_types) # Stage 2: Model Identification import matplotlib.pyplot as plt from statsmodels.graphics.tsaplots import plot_acf, plot_pacf # Plot the ACF and PACF of the preprocessed 'Energy (kWh)' series fig, ax = plt.subplots(figsize=(12, 6)) plot_acf(df["Energy (kWh)"], lags=50, ax=ax) plt.xlabel("Lag") plt.ylabel("Autocorrelation") plt.title("ACF Plot") plt.show() fig, ax = plt.subplots(figsize=(12, 6)) plot_pacf(df["Energy (kWh)"], lags=50, ax=ax, method="ywm") plt.xlabel("Lag") plt.ylabel("Partial Autocorrelation") plt.title("PACF Plot") plt.show()
false
0
905
0
905
905
129089294
<jupyter_start><jupyter_text>Anemia Disease Citation Request: See the articles for more detailed information on the data. Kilicarslan, S., Celik, M., & Sahin, Ş. (2021). Hybrid models based on genetic algorithm and deep learning algorithms for nutritional Anemia disease classification. Biomedical Signal Processing and Control, 63, 102231. About Dataset Data The anemia dataset used in this study were obtained from the Faculty of Medicine, Tokat Gaziosmanpaşa University, Turkey. The data contains the complete blood count test results of 15,300 patients in the 5-year interval between 2013 and 2018. The dataset of pregnant women, children, and patients with cancer were excluded from the study. The noise in the dataset was eliminated and the parameters, which were considered insignificant in the diagnosis of anemia, were excluded from the dataset with the help of the experts. It is observed that, in the dataset, some of the records have missing parameter values and have values outside the reference range of the parameters which are marked by specialist doctors as noise in our study. Thus, records that have missing data and parameter values outside the reference ranges were removed from the dataset. In the study, Pearson correlation method was used to understand whether there is any relationship between the parameters. It is observed that the relationship between the parameters in the dataset is generally a weak relationship which is below p &lt; 0.4 [59]. Because of this reason none of the parameters excluded from the dataset. Twenty-four features (Table 1) and 5 classes in the dataset were used in the study (Table 2). Since the difference between the parameters in the dataset was very high, a linear transformation was performed on the data with min-max normalization [30]. This dataset consists of data from 15,300 patients, of which 10,379 were female and 4921 were male. The dataset consists of 1019 (7%) patients with HGB-anemia, 4182 (27%) patients with iron deficiency, 199 (1%) patients with B12 deficiency, 153 (1%) patients with folate deficiency, and 9747 (64%) patients who had no anemia (Table 2). The transferring saturation in the dataset was obtained by the "SDTSD" feature, using the Eq. (1), which was developed with the help of a specialist physician. Saturation is the ratio of serum iron to total serum iron. In the Equation SD represents Serum Iron and TSD represents Total Serum Iron. (1) image.png Table 1. Anemia Disease Dataset Attributes and Their Descriptions image.png In the study, GA-SAE and GA-CNN models were proposed for the classification of HBG-anemia, iron deficiency anemia, B12 deficiency anemia, folate deficiency anemia, and patients without anemia (Table 2). The hyperparameters of the proposed deep leaning algorithms of SAE and CNN are determined by using the global and local search capabilities of the GA. image.png Citation Request: See the articles for more detailed information on the data. Kilicarslan, S., Celik, M., & Sahin, Ş. (2021). Hybrid models based on genetic algorithm and deep learning algorithms for nutritional Anemia disease classification. Biomedical Signal Processing and Control, 63, 102231. Kaggle dataset identifier: anemia-disease <jupyter_script>import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np from scipy import stats anemifirst = pd.read_csv("anemi.csv") anemifirst anemifirst.rename(columns={"All_Class": "TargetClass"}, inplace=True) anemifirst.drop( [ "HGB_Anemia_Class", "Iron_anemia_Class", "Folate_anemia_class", "B12_Anemia_class", ], axis=1, inplace=True, ) anemifirst.rename( columns={"TSD": "SDTSD", "FERRITTE": "FERRITIN", "SDTSD": "TSD"}, inplace=True ) # z_scores = stats.zscore(anemifirst) # abs_z_scores = np.abs(z_scores) # filtered_entries = (abs_z_scores < 3.45).all(axis=1) # anemi = anemifirst[filtered_entries] # anemi.reset_index(inplace=True, drop=True) anemi = anemifirst dataset_reduced = anemi["FERRITIN"] > 0 dataset_reduced.value_counts() anemi.info() anemi.describe() anemi.sort_values(["FERRITIN"], ascending=False).groupby("FERRITIN").head(10) y = anemi["TargetClass"] X = anemi anemi X.drop(["TargetClass"], axis=1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, shuffle=True ) from sklearn.metrics import ( accuracy_score, confusion_matrix, precision_score, recall_score, ) from sklearn.ensemble import VotingClassifier from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn import tree from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.ensemble import ExtraTreesClassifier def give_scores(y_test, y_pred): accuracy = accuracy_score(y_test, y_pred) precision = precision_score(y_test, y_pred, average="weighted") recall = recall_score(y_test, y_pred, average="weighted") return accuracy, precision, recall from sklearn.model_selection import cross_val_score def model_accuracy(model, X_train=X_train, y_train=y_train): accuracies = cross_val_score(estimator=model, X=X_train, y=y_train, cv=10) print("Accuracy: {:.2f} %".format(accuracies.mean() * 100)) print("Standard Deviation: {:.2f} %".format(accuracies.std() * 100)) svc = SVC(probability=True) knc = KNeighborsClassifier(n_neighbors=5, metric="euclidean") gnb = GaussianNB() dtc = DecisionTreeClassifier(max_depth=4) lrc = LogisticRegression(solver="liblinear", penalty="l1") rfc = RandomForestClassifier(n_estimators=50, random_state=2) etc = ExtraTreesClassifier(n_estimators=50, random_state=2) # ## SVC svc.fit(X_train, y_train) y_pred_svc = svc.predict(X_test) confusion_matrix(y_test, y_pred_svc) give_scores(y_test, y_pred_svc) from sklearn.metrics import mean_absolute_error, mean_squared_error print("\nMAE: {}".format(mean_absolute_error(y_pred_svc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_svc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_svc, y_test)))) # ## KNC knc.fit(X_train, y_train) y_pred_knc = knc.predict(X_test) confusion_matrix(y_test, y_pred_knc) give_scores(y_test, y_pred_knc) print("\nMAE: {}".format(mean_absolute_error(y_pred_knc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_knc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_knc, y_test)))) # ## GNB gnb.fit(X_train, y_train) y_pred_gnb = gnb.predict(X_test) confusion_matrix(y_test, y_pred_gnb) give_scores(y_test, y_pred_gnb) from sklearn.metrics import mean_absolute_error, mean_squared_error print("\nMAE: {}".format(mean_absolute_error(y_pred_gnb, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_gnb, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_gnb, y_test)))) # ## DTC dtc.fit(X_train, y_train) y_pred_dtc = dtc.predict(X_test) confusion_matrix(y_test, y_pred_dtc) give_scores(y_test, y_pred_dtc) print("\nMAE: {}".format(mean_absolute_error(y_pred_dtc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_dtc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_dtc, y_test)))) # ## LRC lrc.fit(X_train, y_train) y_pred_lrc = lrc.predict(X_test) confusion_matrix(y_test, y_pred_lrc) give_scores(y_test, y_pred_lrc) print("\nMAE: {}".format(mean_absolute_error(y_pred_lrc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_lrc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_lrc, y_test)))) # ## RFC rfc.fit(X_train, y_train) y_pred_rfc = rfc.predict(X_test) confusion_matrix(y_test, y_pred_rfc) give_scores(y_test, y_pred_rfc) print("\nMAE: {}".format(mean_absolute_error(y_pred_rfc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_rfc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_rfc, y_test)))) # ## ETC etc.fit(X_train, y_train) y_pred_etc = etc.predict(X_test) confusion_matrix(y_test, y_pred_etc) give_scores(y_test, y_pred_etc) print("\nMAE: {}".format(mean_absolute_error(y_pred_etc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_etc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_etc, y_test)))) # ## VC vc = VotingClassifier([["svc", SVC()], ["knc", KNeighborsClassifier()]], voting="hard") vc.fit(X_train, y_train) y_pred_vc = vc.predict(X_test) confusion_matrix(y_test, y_pred_vc) give_scores(y_test, y_pred_vc) print("\nMAE: {}".format(mean_absolute_error(y_pred_vc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_vc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_vc, y_test)))) from sklearn.preprocessing import LabelBinarizer label_binarizer = LabelBinarizer().fit(y_train) y_onehot_test = label_binarizer.transform(y_test) y_onehot_test.shape # (n_samples, n_classes) label_binarizer.transform([0]) y_score_svc = svc.fit(X_train, y_train).predict_proba(X_test) y_score_knc = knc.fit(X_train, y_train).predict_proba(X_test) y_score_gnb = gnb.fit(X_train, y_train).predict_proba(X_test) y_score_dtc = dtc.fit(X_train, y_train).predict_proba(X_test) y_score_lrc = lrc.fit(X_train, y_train).predict_proba(X_test) y_score_rfc = rfc.fit(X_train, y_train).predict_proba(X_test) y_score_etc = etc.fit(X_train, y_train).predict_proba(X_test) y_score_all = [ y_score_svc, y_score_knc, y_score_gnb, y_score_dtc, y_score_lrc, y_score_rfc, y_score_etc, ] title_names = [ "One-vs-Rest ROC curves: SVC", "One-vs-Rest ROC curves: KNC", "One-vs-Rest ROC curves: GNB", "One-vs-Rest ROC curves: DTC", "One-vs-Rest ROC curves: LRC", "One-vs-Rest ROC curves: RFC", "One-vs-Rest ROC curves: ETC", ] anemia_classes = [ "nicht anämisch", "HGB Anämie", "Eisenmangelanämie", "Folatemangelanämie", "B12-Mangelanämie", ] colors = ["green", "pink", "blue", "darkorange", "lightblue", "purple", "yellow"] import matplotlib.pyplot as plt from sklearn.metrics import RocCurveDisplay for j in range(0, 7): for i in range(0, 5): RocCurveDisplay.from_predictions( y_onehot_test[:, i], y_score_all[j][:, i], name=f"{anemia_classes[i]} gegen den Rest", color=colors[j], ) plt.plot([0, 1], [0, 1], "k--", label="Zufallswahrscheinlichkeit (AUC = 0.5)") plt.axis("square") plt.xlabel("Falsch Positiv Rate", fontsize="15") plt.ylabel("Richtig Positive Rate", fontsize="15") plt.rc("xtick", labelsize="15") plt.rc("ytick", labelsize="15") plt.title(title_names[j], color="darkred") plt.legend(loc=4, fontsize="9.5") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/089/129089294.ipynb
anemia-disease
serhathoca
[{"Id": 129089294, "ScriptId": 36082536, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8579837, "CreationDate": "05/10/2023 23:36:30", "VersionNumber": 1.0, "Title": "Anemia Disease Dataset", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 230.0, "LinesInsertedFromPrevious": 230.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": 230.0, "LinesDeletedFromFork": 23.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 0.0, "TotalVotes": 0}]
[{"Id": 184833871, "KernelVersionId": 129089294, "SourceDatasetVersionId": 4342817}]
[{"Id": 4342817, "DatasetId": 2556689, "DatasourceVersionId": 4401254, "CreatorUserId": 2694551, "LicenseName": "Database: Open Database, Contents: \u00a9 Original Authors", "CreationDate": "10/17/2022 19:13:53", "VersionNumber": 2.0, "Title": "Anemia Disease", "Slug": "anemia-disease", "Subtitle": NaN, "Description": "Citation Request: See the articles for more detailed information on the data.\n\nKilicarslan, S., Celik, M., & Sahin, \u015e. (2021). Hybrid models based on genetic algorithm and deep learning algorithms for nutritional Anemia disease classification. Biomedical Signal Processing and Control, 63, 102231.\n\nAbout Dataset\n\nData\n\nThe anemia dataset used in this study were obtained from the Faculty of Medicine, Tokat Gaziosmanpa\u015fa University, Turkey. The data contains the complete blood count test results of 15,300 patients in the 5-year interval between 2013 and 2018. The dataset of pregnant women, children, and patients with cancer were excluded from the study. The noise in the dataset was eliminated and the parameters, which were considered insignificant in the diagnosis of anemia, were excluded from the dataset with the help of the experts. It is observed that, in the dataset, some of the records have missing parameter values and have values outside the reference range of the parameters which are marked by specialist doctors as noise in our study. Thus, records that have missing data and parameter values outside the reference ranges were removed from the dataset. In the study, Pearson correlation method was used to understand whether there is any relationship between the parameters. It is observed that the relationship between the parameters in the dataset is generally a weak relationship which is below p\u202f&lt;\u202f0.4 [59]. Because of this reason none of the parameters excluded from the dataset. Twenty-four features (Table 1) and 5 classes in the dataset were used in the study (Table 2). Since the difference between the parameters in the dataset was very high, a linear transformation was performed on the data with min-max normalization [30]. This dataset consists of data from 15,300 patients, of which 10,379 were female and 4921 were male. The dataset consists of 1019 (7%) patients with HGB-anemia, 4182 (27%) patients with iron deficiency, 199 (1%) patients with B12 deficiency, 153 (1%) patients with folate deficiency, and 9747 (64%) patients who had no anemia (Table 2). The transferring saturation in the dataset was obtained by the \"SDTSD\" feature, using the Eq. (1), which was developed with the help of a specialist physician. Saturation is the ratio of serum iron to total serum iron. In the Equation SD represents Serum Iron and TSD represents Total Serum Iron. (1)\n\nimage.png\n\nTable 1. Anemia Disease Dataset Attributes and Their Descriptions\n\nimage.png\n\nIn the study, GA-SAE and GA-CNN models were proposed for the classification of HBG-anemia, iron deficiency anemia, B12 deficiency anemia, folate deficiency anemia, and patients without anemia (Table 2). The hyperparameters of the proposed deep leaning algorithms of SAE and CNN are determined by using the global and local search capabilities of the GA.\n\nimage.png\n\nCitation Request: See the articles for more detailed information on the data.\n\nKilicarslan, S., Celik, M., & Sahin, \u015e. (2021). Hybrid models based on genetic algorithm and deep learning algorithms for nutritional Anemia disease classification. Biomedical Signal Processing and Control, 63, 102231.", "VersionNotes": "Data Update 2022/10/17", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2556689, "CreatorUserId": 2694551, "OwnerUserId": 2694551.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4342817.0, "CurrentDatasourceVersionId": 4401254.0, "ForumId": 2585904, "Type": 2, "CreationDate": "10/17/2022 19:09:56", "LastActivityDate": "10/17/2022", "TotalViews": 1536, "TotalDownloads": 278, "TotalVotes": 7, "TotalKernels": 1}]
[{"Id": 2694551, "UserName": "serhathoca", "DisplayName": "Serhat KILI\u00c7ARSLAN", "RegisterDate": "01/11/2019", "PerformanceTier": 0}]
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np from scipy import stats anemifirst = pd.read_csv("anemi.csv") anemifirst anemifirst.rename(columns={"All_Class": "TargetClass"}, inplace=True) anemifirst.drop( [ "HGB_Anemia_Class", "Iron_anemia_Class", "Folate_anemia_class", "B12_Anemia_class", ], axis=1, inplace=True, ) anemifirst.rename( columns={"TSD": "SDTSD", "FERRITTE": "FERRITIN", "SDTSD": "TSD"}, inplace=True ) # z_scores = stats.zscore(anemifirst) # abs_z_scores = np.abs(z_scores) # filtered_entries = (abs_z_scores < 3.45).all(axis=1) # anemi = anemifirst[filtered_entries] # anemi.reset_index(inplace=True, drop=True) anemi = anemifirst dataset_reduced = anemi["FERRITIN"] > 0 dataset_reduced.value_counts() anemi.info() anemi.describe() anemi.sort_values(["FERRITIN"], ascending=False).groupby("FERRITIN").head(10) y = anemi["TargetClass"] X = anemi anemi X.drop(["TargetClass"], axis=1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, shuffle=True ) from sklearn.metrics import ( accuracy_score, confusion_matrix, precision_score, recall_score, ) from sklearn.ensemble import VotingClassifier from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn import tree from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.ensemble import ExtraTreesClassifier def give_scores(y_test, y_pred): accuracy = accuracy_score(y_test, y_pred) precision = precision_score(y_test, y_pred, average="weighted") recall = recall_score(y_test, y_pred, average="weighted") return accuracy, precision, recall from sklearn.model_selection import cross_val_score def model_accuracy(model, X_train=X_train, y_train=y_train): accuracies = cross_val_score(estimator=model, X=X_train, y=y_train, cv=10) print("Accuracy: {:.2f} %".format(accuracies.mean() * 100)) print("Standard Deviation: {:.2f} %".format(accuracies.std() * 100)) svc = SVC(probability=True) knc = KNeighborsClassifier(n_neighbors=5, metric="euclidean") gnb = GaussianNB() dtc = DecisionTreeClassifier(max_depth=4) lrc = LogisticRegression(solver="liblinear", penalty="l1") rfc = RandomForestClassifier(n_estimators=50, random_state=2) etc = ExtraTreesClassifier(n_estimators=50, random_state=2) # ## SVC svc.fit(X_train, y_train) y_pred_svc = svc.predict(X_test) confusion_matrix(y_test, y_pred_svc) give_scores(y_test, y_pred_svc) from sklearn.metrics import mean_absolute_error, mean_squared_error print("\nMAE: {}".format(mean_absolute_error(y_pred_svc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_svc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_svc, y_test)))) # ## KNC knc.fit(X_train, y_train) y_pred_knc = knc.predict(X_test) confusion_matrix(y_test, y_pred_knc) give_scores(y_test, y_pred_knc) print("\nMAE: {}".format(mean_absolute_error(y_pred_knc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_knc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_knc, y_test)))) # ## GNB gnb.fit(X_train, y_train) y_pred_gnb = gnb.predict(X_test) confusion_matrix(y_test, y_pred_gnb) give_scores(y_test, y_pred_gnb) from sklearn.metrics import mean_absolute_error, mean_squared_error print("\nMAE: {}".format(mean_absolute_error(y_pred_gnb, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_gnb, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_gnb, y_test)))) # ## DTC dtc.fit(X_train, y_train) y_pred_dtc = dtc.predict(X_test) confusion_matrix(y_test, y_pred_dtc) give_scores(y_test, y_pred_dtc) print("\nMAE: {}".format(mean_absolute_error(y_pred_dtc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_dtc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_dtc, y_test)))) # ## LRC lrc.fit(X_train, y_train) y_pred_lrc = lrc.predict(X_test) confusion_matrix(y_test, y_pred_lrc) give_scores(y_test, y_pred_lrc) print("\nMAE: {}".format(mean_absolute_error(y_pred_lrc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_lrc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_lrc, y_test)))) # ## RFC rfc.fit(X_train, y_train) y_pred_rfc = rfc.predict(X_test) confusion_matrix(y_test, y_pred_rfc) give_scores(y_test, y_pred_rfc) print("\nMAE: {}".format(mean_absolute_error(y_pred_rfc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_rfc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_rfc, y_test)))) # ## ETC etc.fit(X_train, y_train) y_pred_etc = etc.predict(X_test) confusion_matrix(y_test, y_pred_etc) give_scores(y_test, y_pred_etc) print("\nMAE: {}".format(mean_absolute_error(y_pred_etc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_etc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_etc, y_test)))) # ## VC vc = VotingClassifier([["svc", SVC()], ["knc", KNeighborsClassifier()]], voting="hard") vc.fit(X_train, y_train) y_pred_vc = vc.predict(X_test) confusion_matrix(y_test, y_pred_vc) give_scores(y_test, y_pred_vc) print("\nMAE: {}".format(mean_absolute_error(y_pred_vc, y_test))) print("MSE: {}".format(mean_squared_error(y_pred_vc, y_test))) print("RMSE: {}".format(np.sqrt(mean_squared_error(y_pred_vc, y_test)))) from sklearn.preprocessing import LabelBinarizer label_binarizer = LabelBinarizer().fit(y_train) y_onehot_test = label_binarizer.transform(y_test) y_onehot_test.shape # (n_samples, n_classes) label_binarizer.transform([0]) y_score_svc = svc.fit(X_train, y_train).predict_proba(X_test) y_score_knc = knc.fit(X_train, y_train).predict_proba(X_test) y_score_gnb = gnb.fit(X_train, y_train).predict_proba(X_test) y_score_dtc = dtc.fit(X_train, y_train).predict_proba(X_test) y_score_lrc = lrc.fit(X_train, y_train).predict_proba(X_test) y_score_rfc = rfc.fit(X_train, y_train).predict_proba(X_test) y_score_etc = etc.fit(X_train, y_train).predict_proba(X_test) y_score_all = [ y_score_svc, y_score_knc, y_score_gnb, y_score_dtc, y_score_lrc, y_score_rfc, y_score_etc, ] title_names = [ "One-vs-Rest ROC curves: SVC", "One-vs-Rest ROC curves: KNC", "One-vs-Rest ROC curves: GNB", "One-vs-Rest ROC curves: DTC", "One-vs-Rest ROC curves: LRC", "One-vs-Rest ROC curves: RFC", "One-vs-Rest ROC curves: ETC", ] anemia_classes = [ "nicht anämisch", "HGB Anämie", "Eisenmangelanämie", "Folatemangelanämie", "B12-Mangelanämie", ] colors = ["green", "pink", "blue", "darkorange", "lightblue", "purple", "yellow"] import matplotlib.pyplot as plt from sklearn.metrics import RocCurveDisplay for j in range(0, 7): for i in range(0, 5): RocCurveDisplay.from_predictions( y_onehot_test[:, i], y_score_all[j][:, i], name=f"{anemia_classes[i]} gegen den Rest", color=colors[j], ) plt.plot([0, 1], [0, 1], "k--", label="Zufallswahrscheinlichkeit (AUC = 0.5)") plt.axis("square") plt.xlabel("Falsch Positiv Rate", fontsize="15") plt.ylabel("Richtig Positive Rate", fontsize="15") plt.rc("xtick", labelsize="15") plt.rc("ytick", labelsize="15") plt.title(title_names[j], color="darkred") plt.legend(loc=4, fontsize="9.5") plt.show()
false
0
2,914
0
3,772
2,914
129089934
<jupyter_start><jupyter_text>1000_companies_profit The dataset includes sample data of 1000 startup companies operating cost and their profit. Well-formatted dataset for building ML regression pipelines. **Includes** R&D Spend float64 Administration float64 Marketing Spend float64 State object Profit float64 Kaggle dataset identifier: 1000-companies-profit <jupyter_code>import pandas as pd df = pd.read_csv('1000-companies-profit/1000_Companies.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 1000 entries, 0 to 999 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 R&D Spend 1000 non-null float64 1 Administration 1000 non-null float64 2 Marketing Spend 1000 non-null float64 3 State 1000 non-null object 4 Profit 1000 non-null float64 dtypes: float64(4), object(1) memory usage: 39.2+ KB <jupyter_text>Examples: { "R&D Spend": 165349.2, "Administration": 136897.8, "Marketing Spend": 471784.1, "State": "New York", "Profit": 192261.83 } { "R&D Spend": 162597.7, "Administration": 151377.59, "Marketing Spend": 443898.53, "State": "California", "Profit": 191792.06 } { "R&D Spend": 153441.51, "Administration": 101145.55, "Marketing Spend": 407934.54, "State": "Florida", "Profit": 191050.39 } { "R&D Spend": 144372.41, "Administration": 118671.85, "Marketing Spend": 383199.62, "State": "New York", "Profit": 182901.99 } <jupyter_script># # Predicting Profit using Multiple Linear Regression Model based on R&D Spend, Administration, and Marketing Spend # The model I have created uses Linear Regression to predict the profit of a company based on its investment in Research and Development (R&D), Administration, and Marketing Spend. The dataset used to train the model contains information on these three variables and the corresponding profits earned by various companies. # By analyzing the data, the model has learned to identify the relationships between the input variables and the target variable (profit), and can use this knowledge to make predictions on new data. The model can be used to help businesses make informed decisions about their investments by providing a reliable estimate of the expected import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import sklearn # ## Loading Data df = pd.read_csv("/kaggle/input/1000-companies-profit/1000_Companies.csv") df.shape df.sample(10) df.isnull().sum() df.corr() plt.scatter(df["R&D Spend"], df["Profit"]) plt.xlabel("R&D Spend") plt.ylabel("Profit") plt.scatter(df["Administration"], df["Profit"]) plt.xlabel("Administration") plt.ylabel("Profit") plt.scatter(df["Marketing Spend"], df["Profit"]) plt.xlabel("Marketing Spend") plt.ylabel("Profit") # ## Spliting Dataset from sklearn.model_selection import train_test_split X, y = df[["R&D Spend", "Administration", "Marketing Spend"]], df["Profit"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=42 ) # ## Training Dataset using Linear Regression from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X_train, y_train) # ## Predicting Dataset clf.predict([[78013.11, 121597.5500, 264346.0600]]) clf.predict(X_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/089/129089934.ipynb
1000-companies-profit
rupakroy
[{"Id": 129089934, "ScriptId": 38370784, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14110262, "CreationDate": "05/10/2023 23:49:55", "VersionNumber": 1.0, "Title": "Linear Regression Multiple Variables", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 61.0, "LinesInsertedFromPrevious": 61.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184835912, "KernelVersionId": 129089934, "SourceDatasetVersionId": 3105372}]
[{"Id": 3105372, "DatasetId": 1896237, "DatasourceVersionId": 3154274, "CreatorUserId": 3072182, "LicenseName": "CC0: Public Domain", "CreationDate": "01/28/2022 10:49:42", "VersionNumber": 1.0, "Title": "1000_companies_profit", "Slug": "1000-companies-profit", "Subtitle": "1000 Companies operating cost sample data list for building regression usecases", "Description": "The dataset includes sample data of 1000 startup companies operating cost and their profit. Well-formatted dataset for building ML regression pipelines.\n**Includes**\nR&D Spend float64\nAdministration float64\nMarketing Spend float64\nState object\nProfit float64", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1896237, "CreatorUserId": 3072182, "OwnerUserId": 3072182.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3105372.0, "CurrentDatasourceVersionId": 3154274.0, "ForumId": 1919554, "Type": 2, "CreationDate": "01/28/2022 10:49:42", "LastActivityDate": "01/28/2022", "TotalViews": 3171, "TotalDownloads": 826, "TotalVotes": 10, "TotalKernels": 10}]
[{"Id": 3072182, "UserName": "rupakroy", "DisplayName": "Rupak Roy/ Bob", "RegisterDate": "04/11/2019", "PerformanceTier": 2}]
# # Predicting Profit using Multiple Linear Regression Model based on R&D Spend, Administration, and Marketing Spend # The model I have created uses Linear Regression to predict the profit of a company based on its investment in Research and Development (R&D), Administration, and Marketing Spend. The dataset used to train the model contains information on these three variables and the corresponding profits earned by various companies. # By analyzing the data, the model has learned to identify the relationships between the input variables and the target variable (profit), and can use this knowledge to make predictions on new data. The model can be used to help businesses make informed decisions about their investments by providing a reliable estimate of the expected import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import sklearn # ## Loading Data df = pd.read_csv("/kaggle/input/1000-companies-profit/1000_Companies.csv") df.shape df.sample(10) df.isnull().sum() df.corr() plt.scatter(df["R&D Spend"], df["Profit"]) plt.xlabel("R&D Spend") plt.ylabel("Profit") plt.scatter(df["Administration"], df["Profit"]) plt.xlabel("Administration") plt.ylabel("Profit") plt.scatter(df["Marketing Spend"], df["Profit"]) plt.xlabel("Marketing Spend") plt.ylabel("Profit") # ## Spliting Dataset from sklearn.model_selection import train_test_split X, y = df[["R&D Spend", "Administration", "Marketing Spend"]], df["Profit"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=42 ) # ## Training Dataset using Linear Regression from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X_train, y_train) # ## Predicting Dataset clf.predict([[78013.11, 121597.5500, 264346.0600]]) clf.predict(X_test)
[{"1000-companies-profit/1000_Companies.csv": {"column_names": "[\"R&D Spend\", \"Administration\", \"Marketing Spend\", \"State\", \"Profit\"]", "column_data_types": "{\"R&D Spend\": \"float64\", \"Administration\": \"float64\", \"Marketing Spend\": \"float64\", \"State\": \"object\", \"Profit\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 R&D Spend 1000 non-null float64\n 1 Administration 1000 non-null float64\n 2 Marketing Spend 1000 non-null float64\n 3 State 1000 non-null object \n 4 Profit 1000 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 39.2+ KB\n", "summary": "{\"R&D Spend\": {\"count\": 1000.0, \"mean\": 81668.9272, \"std\": 46537.56789148918, \"min\": 0.0, \"25%\": 43084.5, \"50%\": 79936.0, \"75%\": 124565.5, \"max\": 165349.2}, \"Administration\": {\"count\": 1000.0, \"mean\": 122963.8976117, \"std\": 12613.927534630991, \"min\": 51283.14, \"25%\": 116640.68485, \"50%\": 122421.61215, \"75%\": 129139.118, \"max\": 321652.14}, \"Marketing Spend\": {\"count\": 1000.0, \"mean\": 226205.05841882998, \"std\": 91578.39354210424, \"min\": 0.0, \"25%\": 150969.5846, \"50%\": 224517.88735, \"75%\": 308189.808525, \"max\": 471784.1}, \"Profit\": {\"count\": 1000.0, \"mean\": 119546.16465561, \"std\": 42888.63384847688, \"min\": 14681.4, \"25%\": 85943.1985425, \"50%\": 117641.4663, \"75%\": 155577.107425, \"max\": 476485.43}}", "examples": "{\"R&D Spend\":{\"0\":165349.2,\"1\":162597.7,\"2\":153441.51,\"3\":144372.41},\"Administration\":{\"0\":136897.8,\"1\":151377.59,\"2\":101145.55,\"3\":118671.85},\"Marketing Spend\":{\"0\":471784.1,\"1\":443898.53,\"2\":407934.54,\"3\":383199.62},\"State\":{\"0\":\"New York\",\"1\":\"California\",\"2\":\"Florida\",\"3\":\"New York\"},\"Profit\":{\"0\":192261.83,\"1\":191792.06,\"2\":191050.39,\"3\":182901.99}}"}}]
true
1
<start_data_description><data_path>1000-companies-profit/1000_Companies.csv: <column_names> ['R&D Spend', 'Administration', 'Marketing Spend', 'State', 'Profit'] <column_types> {'R&D Spend': 'float64', 'Administration': 'float64', 'Marketing Spend': 'float64', 'State': 'object', 'Profit': 'float64'} <dataframe_Summary> {'R&D Spend': {'count': 1000.0, 'mean': 81668.9272, 'std': 46537.56789148918, 'min': 0.0, '25%': 43084.5, '50%': 79936.0, '75%': 124565.5, 'max': 165349.2}, 'Administration': {'count': 1000.0, 'mean': 122963.8976117, 'std': 12613.927534630991, 'min': 51283.14, '25%': 116640.68485, '50%': 122421.61215, '75%': 129139.118, 'max': 321652.14}, 'Marketing Spend': {'count': 1000.0, 'mean': 226205.05841882998, 'std': 91578.39354210424, 'min': 0.0, '25%': 150969.5846, '50%': 224517.88735, '75%': 308189.808525, 'max': 471784.1}, 'Profit': {'count': 1000.0, 'mean': 119546.16465561, 'std': 42888.63384847688, 'min': 14681.4, '25%': 85943.1985425, '50%': 117641.4663, '75%': 155577.107425, 'max': 476485.43}} <dataframe_info> RangeIndex: 1000 entries, 0 to 999 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 R&D Spend 1000 non-null float64 1 Administration 1000 non-null float64 2 Marketing Spend 1000 non-null float64 3 State 1000 non-null object 4 Profit 1000 non-null float64 dtypes: float64(4), object(1) memory usage: 39.2+ KB <some_examples> {'R&D Spend': {'0': 165349.2, '1': 162597.7, '2': 153441.51, '3': 144372.41}, 'Administration': {'0': 136897.8, '1': 151377.59, '2': 101145.55, '3': 118671.85}, 'Marketing Spend': {'0': 471784.1, '1': 443898.53, '2': 407934.54, '3': 383199.62}, 'State': {'0': 'New York', '1': 'California', '2': 'Florida', '3': 'New York'}, 'Profit': {'0': 192261.83, '1': 191792.06, '2': 191050.39, '3': 182901.99}} <end_description>
526
0
1,148
526
129184384
<jupyter_start><jupyter_text>Students Exam Scores: Extended Dataset This dataset includes scores from three test scores of students at a (fictional) public school and a variety of personal and socio-economic factors that may have interaction effects upon them. **Remark/warning/disclaimer:** - This datasets are **fictional** and should be used for **educational purposes only**. - The original dataset generator creator is Mr. [Royce Kimmons](http://roycekimmons.com/tools/generated_data/exams) - There are *similar datasets* on kaggle already but this one is **different** and **arguably better** in two ways. -&gt; 1) has **more data** (**&gt;30k** instead of just the 1k the other datasets have), -&gt; 2) has extended datasets with **more features** (15 instead of 9) and has **missing values** which makes it ideal for data cleaning and data preprocessing. ### Data Dictionary (column description) 1. **Gender**: Gender of the student (male/female) 2. **EthnicGroup**: Ethnic group of the student (group A to E) 3. **ParentEduc**: Parent(s) education background (from some_highschool to master's degree) 4. **LunchType**: School lunch type (standard or free/reduced) 5. **TestPrep**: Test preparation course followed (completed or none) 6. **ParentMaritalStatus**: Parent(s) marital status (married/single/widowed/divorced) 7. **PracticeSport**: How often the student parctice sport (never/sometimes/regularly)) 8. **IsFirstChild**: If the child is first child in the family or not (yes/no) 9. **NrSiblings**: Number of siblings the student has (0 to 7) 10. **TransportMeans**: Means of transport to school (schoolbus/private) 11. **WklyStudyHours**: Weekly self-study hours(less that 5hrs; between 5 and 10hrs; more than 10hrs) 12. **MathScore**: math test score(0-100) 13. **ReadingScore**: reading test score(0-100) 14. **WritingScore**: writing test score(0-100) ### Analytics questions: 1. What factors (features) affect test scores most? 2. Are there interacting features which affect test scores? Kaggle dataset identifier: students-exam-scores <jupyter_script>import pyspark from pyspark.sql import SparkSession from pyspark.sql.functions import * from pyspark.sql.window import Window spark = SparkSession.builder.appName("Score").getOrCreate() df = spark.read.csv( "/kaggle/input/students-exam-scores/Original_data_with_more_rows.csv", header=True ) df = df.withColumnRenamed("_c0", "Id") df = df.withColumn( "total_score", col("MathScore") + col("ReadingScore") + col("WritingScore") ) df = df.withColumn("Max_Marks", lit(300)) df = df.withColumn("Percentage", round(col("total_score") / 300 * 100, 2)) df = df.withColumn("Status", when(col("total_score") > 99, "Passed").otherwise("Fail")) df.createOrReplaceTempView("students_score") df.show() group = spark.sql("select distinct Ethnicgroup from students_score") group.show() parents_ed = spark.sql("select distinct ParentEduc from students_score") parents_ed.show() vari = ( df.groupBy("Ethnicgroup", "Status", "ParentEduc") .agg(round(avg("Percentage"), 2).alias("avg_Percentage")) .orderBy("Ethnicgroup", "Status", "ParentEduc") ) vari.show() df_1 = spark.read.csv( "/kaggle/input/students-exam-scores/Expanded_data_with_more_features.csv", header=True, ) df_1 = df_1.withColumnRenamed("_c0", "Id") # df_1.show() column_names = df_1.columns print(column_names) df_1.select( "Id", "Gender", "ParentMaritalStatus", "PracticeSport", "IsFirstChild", "NrSiblings", "TransportMeans", "WklyStudyHours", "MathScore", "ReadingScore", "WritingScore", ).show() df_1 = df_1.withColumn( "total_score", col("MathScore") + col("ReadingScore") + col("WritingScore") ) df_1 = df_1.withColumn("Max_Marks", lit(300)) df_1 = df_1.withColumn("Percentage", round(col("total_score") / 300 * 100, 2)) df_1 = df_1.withColumn( "Status", when(col("total_score") > 99, "Passed").otherwise("Fail") ) df_1.createOrReplaceTempView("students_expand") df_1.show(5) weekly = spark.sql( "select WklyStudyHours,EthnicGroup, Status,round(avg(Percentage),2)as Percentage from students_expand group by WklyStudyHours ,EthnicGroup ,Status order by Status,EthnicGroup " ) weekly.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/184/129184384.ipynb
students-exam-scores
desalegngeb
[{"Id": 129184384, "ScriptId": 37820261, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8866968, "CreationDate": "05/11/2023 16:03:25", "VersionNumber": 4.0, "Title": "scorecard", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 69.0, "LinesInsertedFromPrevious": 24.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 45.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185006485, "KernelVersionId": 129184384, "SourceDatasetVersionId": 5399169}]
[{"Id": 5399169, "DatasetId": 3128523, "DatasourceVersionId": 5472937, "CreatorUserId": 5430373, "LicenseName": "Other (specified in description)", "CreationDate": "04/14/2023 00:15:38", "VersionNumber": 2.0, "Title": "Students Exam Scores: Extended Dataset", "Slug": "students-exam-scores", "Subtitle": "Exam scores for students at a public school", "Description": "This dataset includes scores from three test scores of students at a (fictional) public school and a variety of personal and socio-economic factors that may have interaction effects upon them. \n\n**Remark/warning/disclaimer:** \n- This datasets are **fictional** and should be used for **educational purposes only**. \n- The original dataset generator creator is Mr. [Royce Kimmons](http://roycekimmons.com/tools/generated_data/exams)\n- There are *similar datasets* on kaggle already but this one is **different** and **arguably better** in two ways. \n -&gt; 1) has **more data** (**&gt;30k** instead of just the 1k the other datasets have),\n -&gt; 2) has extended datasets with **more features** (15 instead of 9) and has **missing values** which makes it ideal for data cleaning and data preprocessing.\n\n### Data Dictionary (column description)\n\n1. **Gender**: Gender of the student (male/female)\n2. **EthnicGroup**: Ethnic group of the student (group A to E)\n3. **ParentEduc**: Parent(s) education background (from some_highschool to master's degree)\n4. **LunchType**: School lunch type (standard or free/reduced)\n5. **TestPrep**: Test preparation course followed (completed or none)\n6. **ParentMaritalStatus**: Parent(s) marital status (married/single/widowed/divorced)\n7. **PracticeSport**: How often the student parctice sport (never/sometimes/regularly))\n8. **IsFirstChild**: If the child is first child in the family or not (yes/no)\n9. **NrSiblings**: Number of siblings the student has (0 to 7)\n10. **TransportMeans**: Means of transport to school (schoolbus/private)\n11. **WklyStudyHours**: Weekly self-study hours(less that 5hrs; between 5 and 10hrs; more than 10hrs)\n12. **MathScore**: math test score(0-100)\n13. **ReadingScore**: reading test score(0-100)\n14. **WritingScore**: writing test score(0-100)\n\n### Analytics questions:\n\n1. What factors (features) affect test scores most?\n2. Are there interacting features which affect test scores?", "VersionNotes": "Data Update 2023-04-14", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3128523, "CreatorUserId": 5430373, "OwnerUserId": 5430373.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5399169.0, "CurrentDatasourceVersionId": 5472937.0, "ForumId": 3192141, "Type": 2, "CreationDate": "04/13/2023 21:52:39", "LastActivityDate": "04/13/2023", "TotalViews": 75452, "TotalDownloads": 15444, "TotalVotes": 282, "TotalKernels": 38}]
[{"Id": 5430373, "UserName": "desalegngeb", "DisplayName": "des.", "RegisterDate": "07/07/2020", "PerformanceTier": 3}]
import pyspark from pyspark.sql import SparkSession from pyspark.sql.functions import * from pyspark.sql.window import Window spark = SparkSession.builder.appName("Score").getOrCreate() df = spark.read.csv( "/kaggle/input/students-exam-scores/Original_data_with_more_rows.csv", header=True ) df = df.withColumnRenamed("_c0", "Id") df = df.withColumn( "total_score", col("MathScore") + col("ReadingScore") + col("WritingScore") ) df = df.withColumn("Max_Marks", lit(300)) df = df.withColumn("Percentage", round(col("total_score") / 300 * 100, 2)) df = df.withColumn("Status", when(col("total_score") > 99, "Passed").otherwise("Fail")) df.createOrReplaceTempView("students_score") df.show() group = spark.sql("select distinct Ethnicgroup from students_score") group.show() parents_ed = spark.sql("select distinct ParentEduc from students_score") parents_ed.show() vari = ( df.groupBy("Ethnicgroup", "Status", "ParentEduc") .agg(round(avg("Percentage"), 2).alias("avg_Percentage")) .orderBy("Ethnicgroup", "Status", "ParentEduc") ) vari.show() df_1 = spark.read.csv( "/kaggle/input/students-exam-scores/Expanded_data_with_more_features.csv", header=True, ) df_1 = df_1.withColumnRenamed("_c0", "Id") # df_1.show() column_names = df_1.columns print(column_names) df_1.select( "Id", "Gender", "ParentMaritalStatus", "PracticeSport", "IsFirstChild", "NrSiblings", "TransportMeans", "WklyStudyHours", "MathScore", "ReadingScore", "WritingScore", ).show() df_1 = df_1.withColumn( "total_score", col("MathScore") + col("ReadingScore") + col("WritingScore") ) df_1 = df_1.withColumn("Max_Marks", lit(300)) df_1 = df_1.withColumn("Percentage", round(col("total_score") / 300 * 100, 2)) df_1 = df_1.withColumn( "Status", when(col("total_score") > 99, "Passed").otherwise("Fail") ) df_1.createOrReplaceTempView("students_expand") df_1.show(5) weekly = spark.sql( "select WklyStudyHours,EthnicGroup, Status,round(avg(Percentage),2)as Percentage from students_expand group by WklyStudyHours ,EthnicGroup ,Status order by Status,EthnicGroup " ) weekly.show()
false
0
729
1
1,321
729
129010847
# # Note: This is very rudimentary Instruction model, trained using Databricks 15K dataset on Bloom 500M params # # # Below results are after training with Instructions dataset # import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = "4.25.1" # check transformers version assert ( transformers.__version__ >= MIN_TRANSFORMERS_VERSION ), f"Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher." # init tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") model = AutoModelForCausalLM.from_pretrained( "sai1881/bloom-560m-finetuned-Instruct-DB-v", torch_dtype=torch.float16 ) model = model.to("cuda:0") # infer inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True, ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split("<By Manoj>")[0]) prompt = "Instruction: Why can camels survive for long without water? <END> \ncontext: <END> \n \nresponse: " inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True, ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split("<By Manoj>")[0]) # # Below results are before training with Instructions dataset # # init tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") model = AutoModelForCausalLM.from_pretrained( "bigscience/bloom-560m", torch_dtype=torch.float16 ) model = model.to("cuda:0") # infer inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True, ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split("<By Manoj>")[0]) prompt = "Instruction: Why can camels survive for long without water? <END> \ncontext: <END> \n \nresponse: " inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True, ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split("<By Manoj>")[0])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/010/129010847.ipynb
null
null
[{"Id": 129010847, "ScriptId": 38312970, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2445878, "CreationDate": "05/10/2023 09:44:59", "VersionNumber": 1.0, "Title": "Large Language Model Custom Model", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 77.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Note: This is very rudimentary Instruction model, trained using Databricks 15K dataset on Bloom 500M params # # # Below results are after training with Instructions dataset # import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = "4.25.1" # check transformers version assert ( transformers.__version__ >= MIN_TRANSFORMERS_VERSION ), f"Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher." # init tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") model = AutoModelForCausalLM.from_pretrained( "sai1881/bloom-560m-finetuned-Instruct-DB-v", torch_dtype=torch.float16 ) model = model.to("cuda:0") # infer inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True, ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split("<By Manoj>")[0]) prompt = "Instruction: Why can camels survive for long without water? <END> \ncontext: <END> \n \nresponse: " inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True, ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split("<By Manoj>")[0]) # # Below results are before training with Instructions dataset # # init tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") model = AutoModelForCausalLM.from_pretrained( "bigscience/bloom-560m", torch_dtype=torch.float16 ) model = model.to("cuda:0") # infer inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True, ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split("<By Manoj>")[0]) prompt = "Instruction: Why can camels survive for long without water? <END> \ncontext: <END> \n \nresponse: " inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True, ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split("<By Manoj>")[0])
false
0
938
0
938
938
129010375
<jupyter_start><jupyter_text>Aerial Semantic Segmentation Drone Dataset Dataset Resource: https://www.tugraz.at/index.php?id=22387 Citation If you use this dataset in your research, please cite the following URL: http://dronedataset.icg.tugraz.at License The Drone Dataset is made freely available to academic and non-academic entities for non-commercial purposes such as academic research, teaching, scientific publications, or personal experimentation. Permission is granted to use the data given that you agree: That the dataset comes "AS IS", without express or implied warranty. Although every effort has been made to ensure accuracy, we (Graz University of Technology) do not accept any responsibility for errors or omissions. That you include a reference to the Semantic Drone Dataset in any work that makes use of the dataset. For research papers or other media link to the Semantic Drone Dataset webpage. That you do not distribute this dataset or modified versions. It is permissible to distribute derivative works in as far as they are abstract representations of this dataset (such as models trained on it or additional annotations that do not directly include any of our data) and do not allow to recover the dataset or something similar in character. That you may not use the dataset or any derivative work for commercial purposes as, for example, licensing or selling the data, or using the data with a purpose to procure a commercial gain. That all rights not expressly granted to you are reserved by us (Graz University of Technology). Dataset Overview The Semantic Drone Dataset focuses on semantic understanding of urban scenes for increasing the safety of autonomous drone flight and landing procedures. The imagery depicts more than 20 houses from nadir (bird's eye) view acquired at an altitude of 5 to 30 meters above ground. A high resolution camera was used to acquire images at a size of 6000x4000px (24Mpx). The training set contains 400 publicly available images and the test set is made up of 200 private images. PERSON DETECTION For the task of person detection the dataset contains bounding box annotations of the training and test set. SEMANTIC SEGMENTATION We prepared pixel-accurate annotation for the same training and test set. The complexity of the dataset is limited to 20 classes as listed in the following table. Table 1: Semanic classes of the Drone Dataset tree, gras, other vegetation, dirt, gravel, rocks, water, paved area, pool, person, dog, car, bicycle, roof, wall, fence, fence-pole, window, door, obstacle Kaggle dataset identifier: semantic-drone-dataset <jupyter_script># # Semantic segmentation using pytorch и deeplabv3 # ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABkAAAAH+CAYAAADEa8IsAAAABHNCSVQICAgIfAhkiAAAIABJREFUeF7svWdwXFeW53kIJGzCe0dYkgBoRVA0oiiJRrYkSqVyKtOqarW6O6Jju3d6KzZ2Nzbmw37oL7MR82Gnomdmd7rUXVXdVVKXSqakkkRJFCVKoui9AT1IAgQJ7z3Avf+buMmHh0wgYRLIl/m/ESCIzPPuO/d3zbvvnXfOWXJPFWEhARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggTAiEBVGbWFTSIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESEAToAGEA4EESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESCDsCNAAEnZdygaRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAm47Ag++NMHcvzEcfvH/JsESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEQo5AbGyM/OynP5PcvLwJuk0ygMD4cej4Edmx7bGQawQVIgESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAFDYGRkWH772zdl+/Yd0xtAcBCMHz//+c9JkARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARClkBfX58cP37Cp37MAeITCz8kARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIIdQIjIyN+VaQBxC8afkECJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJOBUAjSAOLXnqDcJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkIBfAjSA+EXDL0iABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABJxKgAYQp/Yc9SYBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEvBLgAYQv2j4BQmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQgFMJ0ADi1J6j3iRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAn4J0ADiFw2/IAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEScCoBGkCc2nPUmwRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwC8BGkD8ouEXJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACTiVAA4hTe456kwAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJ+CVAA4hfNPyCBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAqQRoAHFqz1FvEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABvwRoAPGLhl+QAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAk4lQANIE7tOepNAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiTglwANIH7R8AsSIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAGnEqABxKk9R71JgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAT8EqABxC8afkECJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJOBUAjSAOLXnqDcJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkIBfAjSA+EXDL0iABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABJxKgAYQp/Yc9SYBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEvBLgAYQv2j4BQmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQgFMJ0ADi1J6j3iRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAn4J0ADiFw2/IAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEScCoBl1MVp94kQALOITAyOiYjo/e0wrGuKImKWqL+vv+ZK3qJuKKjZGzsngyNjMno2JhER0XNu6whFhcbLUvUH770MjrMVXZweHRCG+xts+rgTxY6gE20YgN9B4ZGjVpeNlZ9jSyEBgOQBXP0yqiP/rHXa2R91TtVXxqFA+n3+ZI157K3wXCcrt+tzKdrm5V5IByhwz01zsfu3Zt2TkAWJZB6zfwJpF7rXLMztzObqt99yU41zs0aEKz5bu93rCNxMdF6bUGZqt+N7EzHCOr1NSfsbKKWLJElat3zJWsfYzMdIxhPWDftfWmvd6p+D4Ys2msdu/Z+t+obqCzWTZRA5oS1L6dbY61jZCay/q5r1rGHNRb9br+u2cfIVHPN3j9mbbCOJ1/XtamY+7sOg69dt0DmhWmzGevT9T108yVrn8fQx8hq3QJYO6daD321LdA12fSRr/Fn76PZzjf7OJmu3tlcgwLZ/9ivg/7mhX2sg810smY8YU9jb6+v/kG/+xrrdtmp5tBMZP0xn8nYC/aaPNP5M5P9xFSydjbBvl75Yu6rL62fYbz4W0fmOkYCWWfNmJ6rrL0N1rmGNlr7Yi6yqGu6dcRXv9vnpb99vhkjgcyJmcjOdo2F3ij+9PVXb6D7FNRrlw3kXPa10M58PvTCdd/fXLGfbz76wnDAtX6qPYfpE/4mARIggWATiP6/VLGeZP/+/RITEyMPPfRQsM/N+kmABMKQADZQ2OTqBxvq5257nxw9f0dOXGmS1o5+SU2Kl9iYKLl2u1O+OHVLLt5qUzfAIlmpCdLTPyyHzzXKkdq7cre1V3IzEvXDS6tslDKMZKUkSHff0JSydY2dEh/rklR3nFf20IU70ts/quuFfleVDp8cvSG37naJOyFW6RYnXb1D8uXJejl26a5XFjpcuNkmnx27OaVsYbZbP5w5ro79+sxtudLQLhkp8ZKcGCtNqu1fnLglp662eOtF9xtZ6IDzG9mPj9TJmWstiuWoYpOoR8qh803yzbkGrQNkk1S9dxSn/Urfk1eavbKjyth05IJH1nAEi1tNPfLV6Xo5f6NVbUQ9HIeGx7z1dnQNSKbqB8heb+ySA2cavLIZyfHKADMix2qbtQ5GNk7J3rgzWdb0JZhDFv0erx5e1t5q1zqg36ED6u3oGZKjSm42stAX/XNxvF70O/oSHJs7++W4GksYe/2DI5KuzoV+t8umKJbNqn+mk01S9UK2vWtQvj7dIGeuN+t60T9DyuB1+mqzakOjWHWob77P3Cur+hSyX6o6TF+642OkoaXX2z+Qzctwa+a+ZE29pt9zIat0OHJhcv80KB32Hr/hZZ6dBn3HvGPE2u8YI1bZHCWLB1pm7GE8FeYkqRvHaD2eIItxmqj0R1+Y+XP8cpOew0tzUzRz9DvmD+ZEXIxLMpRs9/hcgyx0yM10i0uNiTPXW+VzNVcg645X83J8DmNe+pI9cLZBty07PVGSEmKkbbx/IIv5AzaYl2dVvZBF/+D8kMW8/EbNVSML5r5k3UoWYwSyWBvU4ibpakzb51pSYozSOUbqLHMC6xvGHsaImT9gY2Rv3O32zjUtq+qF3qYvp5PtGxiWE5daJsxL9I+1XjDHmoF5aWSxFqYnx+q+nE7WjBEju+/ETc0Rcw9tM3PNzGGMBQVJLjd06DFtlzUcTb8rW5Vaa9p0/6Avoa+pd4Ls+NqNvsQYMWywFtrnpbUvsUZa11iMkX3Hb3rXTYwRFIw9I2vGiF02PzNJ1HDS48DM4Zx0tyTEu8TMtXOqHowRzB/MtQNqruO6Bh0gm6hkrfMHsrgGDqtrJ2TBEW0DR8hiPIG5uV5mq8+xNhxU49Ewx3UN/XNFMTfXVjN/+u2yaq7FKNmrPmRx3UabzZpsvQbZ106ss3faPNcgXNsgm66ueejPk2rdNesh1k7I2tctX7Jm/bavnZDF/sKss2Y/geuKde001xWsh2bttF6DrPVOJ+uzXsuabOq172n8Xdvs1ytzHbTvacx8m27/g77v7vPsaaLH11kz33ztaYysdU22zgvr/gey2NPgodVptcbb51Dj+N7D9Dvm0D3L/gf1mjkEWey1MC8wRszcxHXl4Pnbel7YZa37HxhsrNegguyJ1yDrmuHv2mbd05j1BddZ+54G+x/r+m3WTl/XV8w3rBnWdQvrLPY02CNgjbCunXZZrHHt3Z79hFUW/W69XtnXWbOnyVLrC9gY2dvqWo/rCtZDc72y7lNwvTLr7GxkcW2zXit87Wn8Xa+8exrLdXCq/Y99T2PGSGKcS68j1n0K5g/WOHtfYn9a72NPA2OaWWfNNci6pzHjCfPVLluQlaSvFWYtxDhNUXsU9KV1/4N6IYu55m9Ps1fNCcwfrQPud2z7H+yrsP/BGMH1ysw1cw0y48nfnsZcV8x9iVmPMdem2tOYtRtjJDU5TsDc9Dv2nGb/Y9/T2GVxr2Hd01j3P0YW92EHz93W9xpG1rqnwXjyJTvdnsbUi2ucmT+4b7Tvf/DdTbUHs+9pfMliT2Pmj789DfZV2emea6CR7erF/Ues1sPMVYwxc33G/snsQ82+zL4GmPtZI2vuUe31Qq+pZPFdn2Xds87XQPrC177V3hfHLjWquXC/zZgr8eplJJzb19zFeGpSzwpuqOvAwOCoYN4aI7mebCwkQAIkMEMCg4OD8t5772mbRmFh4YSj6QEyQ5gUJwES8E0ADyawsbuEh1fqpnD7+qX6gduNO53y2kfn1AazS9ZVZEtulltvvC/Utcqv91zQlb3wcIVUFWdIq7oJ+Eg9+D94vlEql6ZLRVGaFMemeGUH1Q3Oz55epb+zy64qz9KbJtT7P947I2nq3H+jHrqU5qVIp3rQ+vZXV9TNQ7PsWFcsK0vTxZUUqx/Q/ErpBtlkJVucmyy96mHiW19e1jc3kF1bkaEf3J+73iL/33un9Y3O/57i9ilbU5ml23Pw3F1584uLWhbnx00QHgD86ycXtEHo2c0V4pGNkn3HGpRul2Wpeqicr9hAFszApqt3UMtWLs3Aq0ryqWKz79RNyVUPeiGLhwhXG7rlf/zprJb93mOVUlGg2qberDWyYAU26Isrqk3QoaNnUP5q9xolm6aMMcNa9sD5Bt0HpQWpWrZO3ZTYZfHw1CpbpHjhodZF9eASsv3qpvKnT1XretE/hvmWlfkC2VTF/MSlJnn9s4ua033Z/kmyGCO+ZO+092rZWmWQWr88R9eLMWJk0ZcZqn+K1AOSFnVTYWQfrymVkrxUfV6rLPp9OlmMEdyMG1no8MGh61oH1AvmI+qJH/ry0+N1WtboAOa//+Ky3FUPCSGLvsSDFLss+vLGnR4vc8iuKsvSD6wh+6dDVyf0u5E142ltRY6WhdEKsphr6Ev0D2TNXPvhzkqpVv1s+h3jCbLL1DjxzFePLOYaxsiq0kwt+96By955uW55tn7Ij7ahXsjiAeCK8XmJ+XPmWqtsrMqVGvWTqR5cgjk4ovztiw/IssI0PS+tsqsqsiRBjfMj6qHebz4+r703kpUBBPMS4+nf9tbqdQTjqao0Q6JTorTsv++7qOf+CsUWcwP9Y+Yw5g/YoBhZjJGS/FT1IDpRz0sji/mD/nGpt/eMLPoSsnjgbJctU3MbHgBmrhUrY09mWryu1zp/dm9dJkuzk/UYsc5LI2vmD+YlZFEv1qHZyIIN+h3zxzovsSZDPzA39UK2MHvdJFmw8iVrrdeMJ8xhyIKNdb7r8aTqMf0O5n/2RLWUK92sHE1fYjxZ+xL6QhZz2PQPZDFGTF9ijBjmmD/od4wRzDVwxPyBJ6EZT5A1a2xrV793XkIWY0SdUj3kbdBrN2TR7xhPkDVrN8aIR3aJd52HDNqbrh4UmTmB9fjlJ1fqNRXzA9e1/acaZJm6plWrOZWZGq9lca3C2/J2WXMNLM5XY0rJoi8hi/KDHZWyXNWD+W6ul2CzTq2H4Gi9tmL+6GugGltWWTPXcA00fWnmGvwN0WbTn5hDWOOsa3KCuiZijStQ16H65m7NEnMTslhn8bDDvsZBFmuRXRYP/aaSNWunWb+NLNhiP4HrinWdxR4BbbaunWbvYZXFfMN6OJWsWWdxbYMsrm32enENAhsrSyMLjoa7WZOxvmA/YV2TcZ3GWm6YmzUZ6xrqNX2PNdnsf6zXNuxpROmAes3+x1yvsHba9zSuaM/1FXsajF9PXybp9cGss2afggdxZg6ZvUe+2qeg3617GswLvMFsvV6ZOQRZ657GzE2sRXuO1un9j5HFnsbImjV5UI1763ro6xoENriuYL4ZWeu1zbrOQhbri9nTmOsg1ji85OBz7VTjxex/sPcw66F93cLagX63rluQRV9C9t2vr+p5jAJZs5/A/tSs31ZZzLXv71jhXTtNv2PdwryMj4v21ot1NikxXu8Nsc6afYqRxd7Qem2zykJfGH6mk8W1zb6nwfXKOod9XQfNngZrkdmnmDUZ10zr/tSsyZC1Xl8xRrCfMOssGOIahD2NvS+xJmP/Y5c1exrrvMR+wuxpzFzDPgV7GrN+Y03GeNLr7Pj+1OxpMpXxA+PJzDWz/4Es7jXM/DF7Gqw5kAXHK/Ud8ui6Qn1tw/dGFvcPWckJev+DMWLWTcyJmhW5eu+B69V7B67oexi0F/MT/W7mMGSxTmJe4r4EstY9De41zDjFNQh7DxSrbL7aS2P/Y50/uAZBFntZ694QsugfX7L2PQ1kYdzB9crca5h6rfsUrHd2WazHvvY0uGcryUvy3guiXhTsPex7Gqss9MWag7k2lSz2NGb++NvToN6KQnVNsOxpsF5UFNZ41wCzZ8U1F/d81n2O0cu6D8W5/kJtUMy9pHU9R3v9yVr719SLfb51XoEv5qu+n1Rj0d4Xpt9wj4hxa+b2GbVemfs+e19AFm3G/hK6YR+Ivb39fsTMXbxkhRfQ9p28pS5jMfL4xlLZWJ2jeEXrF7CMpyiMvSwkQAIkMFcCNIDMlSCPJ4EIJgA3WhS8IYiHOGYzjg22ufmIVW/yYlOJTRTeWDVlUL0FiBsLHGcteEsMstYCWTxQQsH3JuyIkcVmGAYYFF2vqhOfWevGufFmYP/w/brxd3ffsN6UmoK6cawvWXwGnYfH6zCydn37BzxtwM2MKUPDI7peyOLtJlPw/+FRj77eD9EOdR57vfgen6Eeu6zWV53XWvRnFjb4znDBd9YCDhP6x8JwOllTL/rIKutlrvpseLx/0GdoG8pcZKEv6jLF1Ds4FK37B2+84W1q6OBrjEFXjEtTjCzqtRZdr2JhdDbf+aoX3+H4tKT7YwSfmXFu7XerLP6v9R0fV6jDPkZMX5rzQ9aMU/MZfuM4yEI/Mycga9cfspgL9nlpZNFme7/7Go+odxIzNeYwpq3jCeezy0E/tAGyKLjRAQcUc268LYwCWTMnJvb76KR6IW/mmj54vPSrN8vM2jDhczPfLfPHyOKcmLsoZg7buYAjPrMyNuext9nI2uewmZc4j3d9G6/XyOJz8LDKGnlvveNzYoIs5sA4R9Nu0wZryANd7/jcNHKmXvO30c0uh++t892MPTN/dN1KB3M85PWYtsxhfGafq5iXpi/tsob54JDR7v5cw1o4oo6Njo1S5/CMZavOWI/M2IUsHhC5oj3rAca5XRY6aH2tY2R8nbf35f1xOnGdxjjHd2Yt1OuKZZ5BBxS0077Oo999zUmvLK6L48dPdW01/Y65ZoqvvoSOpj+ta5FZZ+8TB3MPd/u8wHH29dCsLzORtc8hnBuf2dcXtMM+fiCLc/mUVfK+xpS1bWadRd1WWes6a/rTjEccb5XF/40OZl7gb1yDzB7BOi/MmmyVNe0y3PAd2mS/VuB7fGbd02h9xsevtW2GjXX8WtdZ+/XKPiZNvfjcyGLO4f9G1qydkDXzwq6D2f/YZdGWCfNt/Hrla75Z54bel42vnfZ+1+uQfc85Lmv08oQU9Kyz6CPr2ol6wRd9avpHt228TiNrHROTZG1rrNl7WPvWrAXWuWnGiOl3vcbpOe9Zt+x7GugFWehm3xviM1/zPhBZc6ydo3W+mzVZ1+eDuZk/dh0gq/n62MvaZc3fhhva66/ffclC3r7/0W2z9aWud3xNBk+zzuIzszZZ9ylog9nT4FhTrHriMzPX7LLW65WVsZk/djZmvmMcmWKda2Ys4Ttf/W7WBpzXWqyyZl/m7R+brJ6reo0Z3ytZ9inWeq37H+t8N2PElyx0miRrmUPg6K1XzU/rvNT1BiCrr6+2eYnzWuu1s0Hdplj3hjjGFLOPxN/efd34PhSfmXEzYZ/j43jN1rKH8/bZLGVxbmtfGH2t+0vTF2Y91fPS0mZdxzhfX2ukqVPLWdZL6/3I/TXvnvIWVp5AysiIa+Ly4jT9kmBb57CcUp7cGIt4kQ8vq+F5AwsJkAAJzIUAk6DPhR6PJYEIJYCbBrhBv/nFJR0aypSMlDj9X7xZiTcREQoLrtNwDY8Zf7gUCDJfslZjQiB1BCKDt9ZnWuwPFmZ6fDDkQ0EnqzFhrm2MUW+cBrv4G0+zGRNWXed6fDDbbX9ggXP5mmtz1WGqsTAffBLUmmJnHgpzYK7c5nI83ppDsbOZS53THTsb5ngDeTYlGON0NnrYj/Gnl7/P7cfP9O/5mD8zPeds5RG+ZSYlmG1D6I1QK/6uQYupJ7w9nFrmc/wgHFIol/lsayi3k7qFJ4Gp9ojz3eLZ7FNmo8NCtmk2+oXqMTCuwcu8vrlLGyTh7bJiaZr2GLmlPAd/91mtvPbhWfndJxd1eE8WEiABEpgrAefudOfach5PAiQwawJwk/+tci2Gy/Deo7e0Oy1u5guU5wc2my0qVv65q236jRq425qHc75OOJuHANaHaHjQNJONp12XqW4k7bJGf+RJmq8y1UMiXw/RZtre+dLTWg+Y+XqgNFVfLoRRIxhtDWad/sYePvc1LmYyzu16Y17a+8ff+LYfGyp/+7qRTVDhP/wVX/PHnyw+Rw6HuRRf+tn7eCbMZyI7F72tx9rbMBsdYISZy4NUuw5Ttc0ua/9b9+sUY8TeP5Cf6biZSr+pvrPPx6lkp/rOri/CUaDMV/2+1qKp9LF+Z+fr79o51TnsdaB+X5/Nps0zMRhaZY2+vq6D0GMm9dpZ2vvTtDXQ/oT8XK8Vdh2go6/P7Loj3MlUxV8d/vS1r8m+jvc156fSYSbfTcU80DXO11gIhs5T6eqrzTPRIVBZ+7yETv761pdO+Mxehz+5qXTyNy/tdfkaT3aZqf6GroHqO1U9c/nO1/nBZq5tg06+GPs631z0D+djp9p7+Gr3fPSZvd6Zzj/78bN5aWWmYwQh3tq7B/SYLVNh7opUyFgYRm6ofCZ43oB8afUtXV7v2QHleYIfE4XCrjP/JgESIIGpCMztbn+qmvkdCZBA2BDQrtrKBRXx1JEPAw+z4dIKQ8fh2kbZsipXxev0xJ7H2xuNrT1yu61bbVh6pEjFNy3KTdIxngsykvVbHSiIbYt4oig5aUiYK/qhWVFWirQXDWpZcxPjSxZ6TJT1PPSBLGKa4ljEN/bUu0TXB3fdTHUus6HzxDxPUUmHVdxkt8eogZijFXlp+jjImoIYv4jfDtm4WM+nkC1QyWTtshmpcVJdkqlv/BACDAUJSREzF5+BhymQLVcbPsRftcqCDfTF96ZAH6ODkU12u3S94F6Yo5JIqzes8RDCyKItYIWCNkK2XRms0B4tq36MLBiZm3qw0zF+lRELsiiI1Q5ZxNIFe9M/Jo40dPDKqr40zCFr6kVfm36fjSzGz7KCDM3GqoOpF95GaCdixsL7yOgAjtAfY9gui7bZZRHDHgWyGE+mXnyG8T+hXv1Wu0rors4BNhgTZjxp5qp/cSNp+h16WGXRh9AXshgHkDX9DlkcZ/od4wjF9CWY+5K1zh8ji75MVrGrUSb3u+fNfMhO6h+Mp+REPaZRL5ITGh3MOMW889TrYdORPz6HxznivGgDCmLTo2BcghUMpehLJFIEBzCHLOYKYtN7ZFVfmrmmdIEOaANi+oM55mXM+MM3jBEja50/eZn31wbIosXodzPf4cGGuYMxYmTR74Y5fltloRd0KMpOUfr26nN6+2d8/mCu6XrHx4h3rilZM4fNXLPKjoxNnMOoF3WAh5mXRl+sQ6Ze9BPmml32fv/cn++QxRyGrNEBbTKy/uqFLOKKG1kcb50/Hh0865CZPziPqdcqi37Xfam4W/sSY8TMYcNcryOqb1AgizEC5pg/KPZ+Nw9jrbKmfyBr5iXWTfQ5zgempl7oaeq1r7GQxXFmXprrmpk/eu0e73cEbwQTrPM4p1UWawtkoSN0GJExPRfK8z3jycii3+2yem0Yv17qtdA71zx5P6C7uQaCsZGFLphrKOgT9CXWBiNr2jx5jfOsh5jz6E9cB8FBX4PG1zizzoK9r/UQx0ySxbieZu009UI3r6xlP2HWWTxE8a5x4+ss+ghtt3KHDkYW3M2a7FN2/Do4Xb24npm108iCk+Fu1mSczy6LOTRZ1nMNwj4FfY+C48AcsqZ/rHsafG/WQzBZosKFYJ2172l0v42vydY9DfQwa6d1n2LWZMhi3cLaid++9jQ4zuxpzHyDrL89jV0W48krO+5VjPFk1lm0xXoNMnsacEKZsCbrPc34tc2yp/Guh5hD4+u3nkOKFZhZ9z9GVs8hiyzWWXO9Mv1jZPVaZPaclnUW89zMH6Ovfe1E32KM2GXt6yzWHzPnjSyuV2Z/imuc0cG6N4Ss2dMYWfv+B+cya7KRNeusZqPmj9l76Gsm2Kh+N/sfIwuDJ/Y01n2KkTXrrH1Pg+sg9EWBrPWaCT3B3Lqnse5PJ/bPFHsa1T6sg1oH655Gtc3el9ADY6O6ZOKeBnMC7UWx71PMnsaMPXMNwp7GrLPoZ3BE0ddM1eco2HuY/Y+RtV6vrHsaXGPMfDfXK92X43slfI+xhIL/m76EDMY52BtZnNdcM+2ymrllTwO4qABMAAAgAElEQVRZFH0NGp8TmJemL3H9NvPSyFrnD5ibPY2RxTXI6GCfw3ZZnBvHedZNdU9l5qX6jTmMz029gcqaPY2p16oD6rfPCfSf2ctqjuPzHbJmfUK/6/sl9Rn2Uyhmb4n/Y9yYee3d54yfC/1m+sGcC7KY13ZZT12e/rXK2vUy49HaF776DX1h9pe+ZD3XD08f49zW/SXaWpzbq9tsve+DXnpttd1jIMTW3ZZevQ/KUnNqrcqz48lTMqSSovfoMHP4/BGVJwdrJ7xEPj1yQ5rU84etawr0mA7UGK07gIUESCDiCSy5p4qVwj/8wz9IYmKi/PznP494OARAAiTgiZF7WSVGQ+I3GDN2bCjW8dLf3n9FJwVHQfK8H6gkjdjI/Jffn5CrdzpkTWm2vPhYhU4M2KSSe2KzkqISEiKJW6zawLV3D+pEkYiVnKUS7CHJHkJmQba7f8jzMMsiizdEUKaSxQazrXNAJwzEQ2okBERCy0G1sbqtjDH9+gEwdFAPvNT3dllsuvBWiV0Wm6tWW72+ZBGfFOWOSsKLmzjoUKg+QzI8bNoamnt0e/EQATeFgcoiQSQ2nL7qhfcNkqeaeu2y+kZcccRNgVU2L92tk/biLZvbavNpZT4fsuhLK0fohf5B/yLhH4rRwZcsxkizkoPOKJkpCVrfqWRRr7XfEWf6bluf7ncwN2xQ72xl7f1u6oWOaJvpdzP2unz0T6CyGCPoH2u9GE94+9her112qn5HUmUYNf31OwyXKOgfyOItq3o1ds0YQVJOzAn0jV3WOn+gA2Qxdg1z1It1xM7ROi+tYwSyvuYw+tKMEdOXZq5ZdbDONWu/W+eldYyYeTmdLNYhJPtEMRzNfEe7u/uGvPPSlyxkcjMSJ/XlVLK+6rWvm+Bo2BgdoKN1/pg1dj5lMccMR2u9Vo5GB1+yuDFHUtip5qUZI9bxZDhijZ1Jv0MWSbut6zFuzs3Ys67d/mQb1Twx8x0PObC+YV6aOWGuVdY1FvVC1qyxU8na57Av2Zt3uydc16zXKutcs853w9HImuuwmWt4bGhnOdXaaV+LIItiXw+RANU6L6ZaOwORNXwMd3MdtF7bjA5GFmuGfW5iDlnX2dnKYk9j9h5Y3+xz01qvkQUnX9fBqWQz1Jps3dNYr21WHXANsq+zRhb7H+tY9yeL9RAx18HWvs7OZU9j1llzXbHON+t1xdo2f9cga7+DDfLb+Lu2+RojRtasW4Hsf4zsbPY0huNU66H92uZLdq57GowxXMMwRtCXU+2VjCyuxfb5bt97WPdggcgGuqcxY8S6/8H8AZvp+t2+T7Hvf6ba0/iSDWRPA2ao1zrXoK91/2OuQYHsf6zXq3y1L0KumkD2KViHUHzdP1j73VyvIGvqRRvmY5+CcYb7Q7P3MPXit6/12H4vCB2sstARaxauE1ZZf3ua+ZS13z9Ah+n2NP72REYvX8fb14D5ksX6Zh/T/vrY1zXMLou+mG5/CWZIeG/f95hrH8bmqcvNeu/02ANFsqwwTYe7+sUfTsgRlRz9qQdL5dXdq5VRJUlOX2uRf3zrhPYMebymVP5SfY77FBYSIAESsBLo6uqSv/7rv9Y2jU2bNk2AQwMIxwoJkIBfAnjwic3GL98/I7Uq1wfekPm7767Xm5OjF+/If/nDSbmijCNryjPl73+wQb+JgQcy2FzhpspsePyegF+QAAlENAHzBsbsMkNENDo2ngRIgARIgARIgARIgAQcSwD3ATDqw3PKrTwT8XLGtdud8qsPzktbT7+8uG2ZbK9Zqo1lv/rogso/elGH+PzBjkr98iWeN5hwWEyS7thhQMVJYF4JTGUAYQiseUXNykggvAjgjVh4bSCGMdxQYQT54mS9fvOmoiBdHltXpBuMkER4OwebljLl8cGHmeE1DtgaEggWAa4VwSLLekmABEiABEiABEiABEggdAngPgDeIfgxBZELfvatldKljB4IpwYvzWPKGwRht4eGx2SdCpUFbxF4pcFr8LryCMHzCuPNFbqtpWYkQAKLTYAGkMXuAZ6fBEKQADYTCEcCV1dsQp7eWCq3W3u1t8cXp+pl3bIsqVmRK7tUOCzE4IQbK2KdovCBZgh2KFUiARIgARIgARIgARIgARIgARIggRAmAGMIXqhEqDd4dSBc21WVFP2uCpeFnCCPq+cPCCeKUMd7jtTJJyovCPKw/ODxFVKtolXQEySEO5eqkcAiE6ABZJE7gKcngVAiADdUxGjFRuL01WZt+Ni6tlA2VOWqpOa98nrPRelQxhHE3lxbkeNN+MmNRij1InUhARIgARIgARIgARIgARIgARIgAecRwAuViESBgpBXG6vzVO7OQUmId+n/Ix/p/hP18sevr+kXNBtVzsctq/J0OO4ovo7pvA6nxiSwQARoAFkg0DwNCTiBAJJHHlcupu9+dUVuNamk4YMjkuSO0d4eO1X8TfyN8sCyHJ2AmIYPJ/QqdSQBEiABEiABEiABEiABEiABEiABZxFAiO215Vk6HNbI6D2Jj4uWQ2cb5a0vL2vjR3JijDyyukgqSzJ0OG4WEiABEvBHgAYQf2T4OQlEKIHMtHjJy3BrA8gp5QWS8LlL0pLitaspko2hJKs4mzR+ROgAYbNJgARIgARIgARIgARIgARIgARIYAEI4LkDEp6jIFT32bo2HZEiNiZKtq4slBcfq9CRKQZUQvWrtzskXcnieQafVyxA5/AUJOAgAjSAOKizqCoJBIvAmIqxOTQypr06ENrqBzs8ZzqivEFOXG7Sic9h/DAbj2DpwXpJgARIgARIgARIgARIgARIgARIgARIwE4gLtalwl3lSmtHv/4KuT+WFaZJ/9CIDou17+QtKcpOkZd2rZDc9ET74fybBEggggnQABLBnc+mkwAIjKiwVxdvtcuJS02yWrmXVqnkYRurc7xw2tVbFkUqEbpLGUdYSIAESIAESIAESGA6AnixYnh4SEZHRyU+PmHB38LE+QcG+mVkxBO6czp98X2iW+11GD4jEFSUIQESIAESIIFFIYAk6XhhMys1UT/HQJSK7r4h+fzELfn9F56wWEtzOmV1aYakqlymkGchARIgARCgAYTjgAQinACSnr//1XX59HidNn58a3OZbF1ToI0gyW6XDA2PqIRiGeKOj4lwUmw+CZAACZAACZBAIARgfPjgTx9Ke0ezvPTSjyUlJSWQw+ZFBsaPW7duyscffyjnz1+cts62zg4pKcyTP/uzn8my5ZULbqyZVkEKkAAJkAAJkAAJeAnAqFFekKr/RkisPUfq5Pf7LukQ3sgJsrI4S3Kz3Dq6BQsJkAAJGAI0gHAskEAEE8BDgiblPnrldpt6c2JYEPLqTluv1Lf0yLMPlcnqskyJWrKEDwMieIyw6SRAAiRAAiQwGwKJiQnS0xM/m0PndAyMH7/5zW/k7NmzUrF8jcRkpvusb3C4T64fOyqffrpPioqKZcuWR6R82QqJkiU+5fkhCZAACZAACZBAaBFo7RqQc1fb5K56qXNpTpI8ubFUdm0oVp4hyTJ2T4X5HhqTOGUw4ZU9tPqN2pDAYhCgAWQxqPOcJBAiBJAYrEwlDPvRzipJT66T8zfa9JsTH6u3KCryUyVHxc20Jw+7p3TnBiJEOpBqkAAJkAAJkEAIEkhITJRtj2xTYbCGJSkpeUE0NJ4fb7zxuly+dlV2ff/PZd3KdSpJ6uTbHXi3njp/Ss5+86U2fuTnpmtdWUiABEiABEiABJxDIDcjUeUEydMK11Rmy7a1Req5Rpy0dw+qhOjt0tM7LJtX5zOahXO6lJqSQNAITL4jCNqpWDEJkEAoERgYGtXqpCTFyaPri2TZ0nQ5eP62fHmqQTKSEqRIvTUB7w9rwcMFvEkRrWJk0wgSSr1JXUiABEiABEggdAhgj7CQYa/QcuP5AePHtmdfkq0bNoorOcknlAvK82Pv7/9FYqPy5YHdK2Xg8nWfcvyQBEiABEiABEggdAkgTDeeZdRU5UqqO04reqWhQ744Wa9f6kxTzzqS3DHyYGXepBc7Q7dV1IwESCAYBGgACQZV1kkCIU4ACcNqb7bJjcZOyVfxMUvyUqVA/X7+4WXywDJPAnTE1bR7f4R4s6geCZAACZAACZBAhBGwe35MZfzoHx6VC6dPyLuv/UKGR9Jk6/M7pLutVWppAImwUcPmkgAJkAAJhAsBGEHwg3wgp1U4rPcOXJYTl5t0iO9+9dJn3Z0uqVmRyxCX4dLhbAcJzJIADSCzBMfDSMDJBHr7h2XPwRs68XluhlvWlGbLxqoc7QWiDR/M++Hk7qXuJEACJEACJBAxBBobbwvCXp2rrZ3e88Nm/CjIK5KLygDCQgIkQAIkQAIk4FwCCNONfCAwfuxXES1iY6JkTXmmbKrOl1VlWZMiWzi3pdScBEhgtgRoAJktOR5HAg4mMKDegEyI90z/K/UdcvNul3x5tl5eeLhCfvxEFWNkOrhvqToJkAAJkAAJRAqBGzduBGT8sHp+gA08P2D8YCEBEiABEiABEnA+AYTeRD6Q5UXpujH4vbE6T/1Ok4RYz3MP5jJ1fj+zBSQwFwI0gMyFHo8lAYcSyEyJl6c3l0hGSpwcOt+ow2ENDo3o1kRHRTm0VVSbBEiABEiABEggUgg0NDQEZPwADxP2qqc3XnZ8/xkaPyJlkLCdJEACJEACEUMAho5nHyqTnv4iKcxO0oYP5C+909YrTR39kpOWoMJ++84NFjGQ2FASiGACNIBEcOez6ZFJALGyEeJqWWGaFOemKLfQPDl84Y60dQ3K1jUFEuuiASQyRwZbTQIkQAIkQALOIBCo5wdac2Lffnn3zV/qnB87vk/PD2f0MLUkARIgARIggZkRQP7SfGXgGFX5Tpva++R4Q4t60bNFLte3S3v3gDz3ULnsVhEvmOd0ZlwpTQLhQoAGkHDpSbaDBAIgALfPKw0dcrdtQCU+T5LM1HivIQR5QVKTYrkhCIAjRUiABEiABEiABBaeAF7iQM6PP/zhDzrnx66dz8uGDRvFlTz5jU5v2Ctl/OhKFNm1ncaPhe8xnpEESIAESIAEFo4AQmEhF8hvPqqVM3XNOtR378CwxERHy7KCDNm2dlA/A2EhARKIPAI0gERen7PFEUygT138vzhZLx8fqZM8lfx8eVGGrF+eKyuKUyU7NYHGjwgeG2w6CZAACZAACYQ6AU/C8zfk1NkzOuG5P+PHkr4+uXDhvLz72i+05weNH6Hes9SPBEiABEiABOaPQGt3nzZ+oORnJsnKkgzZWJUj8XHR83cS1kQCJOAoAjSAOKq7qCwJzJ4AvD9aOwek/m6PXGvs1D+nrjbL4dpG2VSVLz97ulp5gMTN/gQ8kgRIgARIgARIgASCQMB4frzxhsf4MZXnB05/zGL8YMLzIHQIqyQBEiABEiCBECWQlBAjD68u0NoVZafoFz5N9At3fEyIak21SIAEgk2ABpBgE2b9JBAiBOAO6opeIsuL02RrT4FOBtY/NCodPYMhoiHVIAESIAESIAESIIHJBAL1/PCGvRr3/KDxYzJLfkICJEACJEAC4UwAyc+3r18qG1WuUxg8khNjdTL0weFRHQ4L3zMPSDiPALaNBHwToAHENxd+SgJhSSAnPVF2by2XnTVLpamjXxqbe6RVJQRbsTRN3OpNCRYSIAESIAESIAESCBUCxvPjnXfe1WGvpvP8uHD6hDfsFY0fodKL1IMESIAESIAEFo4AjBsweqB09g5JfUuP3G3pldvqBdDM5HjZtDKfeUAWrjt4JhIIGQI0gIRMV1AREgguAYTAwlsPKMj/kZ+VJKvLMmVk9J72DImOjgquAqydBEiABEiABEiABGZAoLnprpiwV1Pl/LB6fqB6Gj9mAJmiJEACJEACJBBmBIZGxuTAmduy7+Qtua5Cf5uoF+uX50hFURoNIGHW32wOCQRCgAaQQChRhgTCgMCgCnd1/GKL1N5skZy0BElOiJUkd4wyhiRJQZZbECKLhQRIgARIgARIgARCgUBDQ4ME6vlR+9XX8u6bv9QJz2n8CIXeow4kQAIkQAIksHgEBodGdKSLE5ebpE39RomJjpZ29f++wZHFU4xnJgESWDQCNIAsGnqemAQWlgA2AQfONMifDl3VJ05xx0maSnqOBOg/eaKKb0EsbHfwbCRAAiRAAiRAAn4I3L1zx+v5ocNebXtUXMlJk6S9nh/K+IFC48ckRPyABEiABEiABCKOQJzK81GSq16KWFnobXuiygdSmOOWDBUGi4UESCDyCNAAEnl9zhZHKIGB8fBXaP7Q8Ji0qBwgXb2DUpDplpGxsQilwmaTAAmQAAmQAAmEEgGr58fa7U/L2i3bfBo/oDM9P0Kp56gLCZAACZAACYQGgVhXlKytyJCKwmQd6js+JlpgFEHo76gljH0RGr1ELUhgYQnQALKwvHk2Elg0AkkqyfnGqhxJiJ847ZcXpuoNAQsJkAAJkAAJkAAJLCYBeH4g7NXhY0cExo+nH33cp/GDnh+L2Us8NwmQAAmQAAmENgGTCN2tnoEg5ykLCZAACdAAwjFAAhFCIEG98bB1baFsqMqd0GK8CYE3JFhIgARIgARIgARIYLEIwPjx5h/e8ho/dm3d6dP4Af3o+bFYvcTzkgAJkAAJkEDoExgZHZOz11tlz8Eb0jcw7FU4U+VCfW5rmZQXpIZ+I6ghCZDAvBKgAWRecbIyEghtAqM+Ql15PqMBJLR7jtqRAAmQAAmQQHgSGBu7J81NdycYP+j5EZ59zVaRAAmQAAmQwEIQGFThv280dsqnx+u8SdBx3vL8VFm/PJcGkIXoBJ6DBEKMAA0gIdYhVIcEgkWgu29I9hypk6O1d6V/cESfJiHOpWJjZsvureWSqhKis5AACZAACZAACZDAQhKwGz+m8vy4cPqEvPvaL2R4RCU2fX6HFOQVLaSqPBcJkAAJkAAJkIADCERHRakoF9GSm+HWuT9MydN/O6ABVJEESGDeCdAAMu9IWSEJhCYBJEG/3tAt+081yPDoqFYyJtqT++OJjSVCJ9DQ7DdqRQIkQAIkQALhSMB4fvzxvXcmhL1KSE+Z1Fxvzg9l/ECh8WMSIn5AAiRAAiRAAiQwTgAhvmtU6O/MtHgZHLqPJdntkrK8yfsMgiMBEgh/AjSAhH8fs4UkoAm41FsQGalxsqwobQKRouwU/V0gZUnUEhGVREz9y0ICJEACJEACJEACsybgy/PDl/EDJ6Dnx6wx80ASIAESIAESiDgCSIKem54oOerHXvgsw06Ef5NAZBCgASQy+pmtJAEV4ipWntpUKhur82RoeERiYzzTPyUxVn8XSMFmITo6MGNJIPVRhgRIgARIgARIIPIIIOF5IJ4fS/r65NiF8zrsFQo9PyJvrLDFJEACJEACJDBTAtrLtLNfbtzpnHAonoHAA4Thv2dKlPIk4HwCNIA4vw/ZAhIIiIBLGS6W5iZLUXbSBHl4dczkLYiZyAakGIVIgARIgARIgAQihkBra6s2fnz1zSFZu/1pQc4Pf54fxvjBnB8RMzzYUBIgARIgARKYM4GhkTE5rnKfvvbhWenoGdT1IRdIfkai/NXuNbKpOn/O52AFJEACziJAA4iz+ovaksCsCfQODEvdnS652dg1oQ7ExVxbkSPxsZ58ILM+AQ8kARIgARIgARIggSkIGM+P6YwfzPkxBUR+RQIkQAIkQAIkMC2BoZFRbfzo7hvWsvidlhQ37XEUIAESCE8CNICEZ7+yVSQwiUBP/7DsPXpL3jtwxfsd3oLYsCxXSvJSlQFkcnzMSZXwAxIgARIgARIgARKYBYGZeH6YnB89vfGy4/vPSEFe0SzOyENIgARIgARIgAQikYAreolUl2bKz55eJf2DI14ECP2dlzExIkYk8mGbSSASCdAAEom9zjZHLIH+gREZHB6N2Paz4SRAAiRgCIyMjun/Ri1ZIkiUyEICJBA8AoF6fkCDE/v2y7tv/lIQ9mrH93fQ+BG8bmHNJEACJEACJBCWBBD+e/nSdFlWmDapfQgBzkICJBB5BGgAibw+Z4sjlECqO06e2lIiVSUTNwH5WW5JSoiJUCpsNgmQQCQRQELEnp5uaWlqkmvX63TT19esl8zMzEjCwLaSwIISCNTzwxv2Shk/upRT6q7tNH4saEfxZCRAAiRAAiQQRgRG1ctO9pc/o6OiJNYVJTSChFFHsykkECABGkACBEUxEnA6AeT4WFuepX/shW8/24nwbxIggXAj0NXVJVevXpNPPtkj+/bulca77VJVVSH/Mf8/Snp6Br1Awq3D2Z6QIADjx1tv/X7ahOdL+vrkwoXz8u5rv9CeHzR+hET3UQkSIAESIAEScCQBeHofv3RX9h1rkD6VC9WUzLQEeXpziaxQ3iEsJEACkUWABpDI6m+2NoIJ4M3n7r4hGbCFwIqPiZbkxFg+/IvgscGmk0C4EzDhd9568y05cuyUtLY2iSvaJfm56TI8fD8ucLhzYPtIYKEIYM/R3t4m7/3xvWmNH9rzw2L82Pq8szw/8JCls6Ndo01KSpK4OCZYXahxxvOQAAmQAAmQgC8CI6P3pLGlVz49XudNgg65pTlJsmVVrq9D+BkJkECYE6ABJMw7mM0jAUMAxo89R+rkaO3dCVDWVmTL7q3lkprEG3aOFhIggfAigIewjY235Z//+Z/l17/6rVy/flkSE92ybt2D2vtj167tkpOTHV6NZmtIIAQIINQcPD/+5de/kWUPbJAUd6pcuHbJp2a379TL5+/8VtyxCeI04wca1NrcJP/4j/+PbtvatWptqa6U4uJiSUlJ8dlefkgCJEACJEACJBB8AskJsVKalyL9Q/dzoBZkuiU2ho9Bg0+fZyCB0CPAmR96fUKNSCAoBOD5cb2hW/afavDWHxsTpf//xMYSSQ3KWVkpCZAACSwOARg/bt26Kb/8p/+uDCCvS31DnRQVlsrzzz8tL774XaleWS3JycmS6E6aVw84hNoaHh7WeUXMW/A9PT0yqj5jCT8CcQkJkpWVxbf+bV3b3t4ue/d+Lr3dQ9LWcEM++t07EuPq0OGtrL9xWENDjz56+1+96MiE5wODg7L38/1y9tQ5ZfRIl9Wrlst3vvcd2bXjcSlcupRjI/ymPVtEAiRAAiQQ4gSQ52NDVa4sU6Gu4KlpCsKCZ6QkhLj2VI8ESCAYBGgACQZV1kkCIUgAoa4Kc9yyUW0ErGV5Ubq4VDIwFhIgARIIJwLNTXflN7/5jfzX//aaCk/TJsuXVctf/tXP5KWXfiiFRUtVCKzgrHvIM9Ld3S2VK5bLxUuX5cyZM3L7jic8TjjxZVs8BBIT4qW8LF821NRI+bIVQRtXTuSdmZEuL//8f5WVyzdJnNqD+Crd/UNy8OA7cvvmTe0l4sTicrkkIztLYmLj5I7yZsHP2XOXdeivV//8L2TzZtV+hsVyYtdSZxIgARIgAYcSQI5TRLhglAuHdiDVJoEgEKABJAhQWSUJhCIB5PlAqKudNUsnqBenXEDTkxn+KhT7jDqRAAnMjkCfSqj84Ycf6bBXyPcB48d/+Pu/le999zuSm5c3u0oDPAohtWAA+eN778jXX1+T7Ow8yc1dKQkq9BZL+BFobr4t+z6/IEePnZMXlHcRH3bf7+PYuHht1Mgv8x9rO7FrUNISM+S23HTs4IAn2V/+9C9kx7bH5NTZM/LNl4d0uL1//+2/S5syvv6Hv/k72frwVhV+L9GxbaTiJEACJEACJOAkAvDCvtPWK1dVBAxriYsVqShIl8zUeCc1h7qSAAnMAwEaQOYBIqsgAScQ4FsQTugl6kgCJDBXArjhOXr0mLzxuifnB8JewfPjhz98SYelCnZxRUdrr49Tpxtl27bdUl1dIzExvMkKNvfFqn9sbETuNjXKoYMfyLt//Eji4+OkZsOD9ARZrA5ZhPMmJSXLs7t3y+jIsLS0tMgnH3+i15/Pv/haPv3gUxnp7Vda/W/yyKOP0BNkEfqHpyQBEiABEog8AkMjY3LqcrP8295a6egZ9AIoUzlBXn1uDQ0gkTck2GISEBpAOAhIIEIIDKjkX7U32+RGY+eEFudnuWVtRY4gHiYLCZAACTidAPJ+4OHjgW8O64TnyPnxve98d0GMHzC+nDlzVo4cqdPGj7Vrt0q0MoiwhB+Be/fuyZIlS3T/wsgW++i35bO9r8vHH38sFRUVCzLewo+qM1uEF0yiZIkyesVJYWGh/OClH0hRUZG43L+Q/Z9+oQ0h+H9Kaoo8+ODGec055Exi1JoESIAESIAEgk9gaGRUGz9aOvAigqjk51HSrv4eGh4J/sl5BhIggZAjQANIyHUJFSKB4BDo7B2U/Scb5OOjdRNO8MjqIinJS1UGEIZmCA551koCJLBQBBD6at9n++SP6k38ru5OefrJJ+SlH/5YisvKF0SF9vY2OX3mtA57Bc8Plyta1HNyljAkAOOHteRkF8iaNY/IV1//Ua5evSrp6Rl80B2G/R5IkxDqCt4eseN5P+AFAkPI70rKtYEEPywkQAIkQAIkQALBI+CKXiLVpZny06eqlcHjfhL05IQYKcpODt6JWTMJkEDIEqABJGS7hoqRwPwT6B8YkcEh9TM8qitHUtK+geH5PxFrJAESIIFFIHDn9m35w3tv6yTEeCv/O9/7jnrjesOChSNqbWmW1vZ+nfMDYa8i3fhhsxGENQ+0NTUtW1JT0pUB5IoOgwWvAJbIJICk51sfflj+RuUDamtukcPKI+1P7++RlSsr5aWXfiwpKSmRCYatJgESIAESIIEFIOCKjpJlhWlSXpAqI6P31L3AEu/vKPsGdQH04SlIgAQWnwANIIvfB9SABBaEQKo7Tp7aUiJVJWkTzocQWPiOhQRIgAScTADeH/u//EpOHj6tm/Hwts3y5JPPLGjiYeNSP5eE5ya00kL1xXyfD/eUCAU2MjIko6MTQwzAKBTOIcFiY2O14WtwYGihuoXlIqoAACAASURBVI/nCWECePiy7ZFt8uO6n0jznXadGP1ffv0bqaxcpY0j+J6FBEiABEiABEhg/gnAARt5QPDyJ4rJAoLfbuUFwpdU5p85aySBUCdAA0io9xD1I4F5IoAcH2vLs2R12eQkwLwJnyfIrIYESGBRCOCBe73K/WG8P8rKlssLLzwveXl5i6LPTE4Kg8HQ0KC0d7RKR0ezPjQ/r1iSktJ0jolglNHRUensapPOzjYZHh6QrKwC5bmQMWfjhKm3peW2NNRfk7t3r8vg4JBK/Bwr7qQM5ZVTIdk5SyUrM1slC3cHo2mLWqfLFbOo5+fJQ48APD2+/e0X5MqVy/LaL2/L2VPn5O2335LKFcsl1wHrU+gRpUYkQAIkQAIkMD2B0dExOX21SfYda5gQ8SIzLUGe3lwiK5amT18JJUiABMKKAA0gYdWdbAwJ+CeAB4TdfUPSawt5FRfjkvTkOMYq94+O35AACYQ4gYGBfjlw4KDX+6Nmw1rZuvVh9eA9tL3b4H3R3d0h584fkc8+e0uuXjkmycm58sMf/c+yoeYx5U0w//oPDPSqEE3n1fl+LydP7lPnSJZnn/upPPH491XOEvesw1R1d7erh7xn5auv3tP1tra0SP9AnyTE388vlZCQICWla5R3zjOyfv12ZehZOmejS6gMzUgPdxYq/RCKeuTm5cuPfvQjOXz0sBz46oDs2/eVPPutZyUzO4deIKHYYdSJBEiABEjA8QQQ9qqxpVc+PV6nnoHcD/m9rChNtqzKdXz72AASIIGZE6ABZObMeAQJOJIAjB97jtTJ0dq7E/RfW5Etu7eWS2rS/D9ocyQoKk0CJOA4At1dXfLRno907g+neH8MDw9KfUOdHDr4J/n88/dVeJxz0tnRprxWiqRLeWbMd8H57jY1yokTn6tcBL/2ni81LUN6e+AJMiQJCbPzyoDx4/Mv3tX1Xrp4XGJi46SoqFgbc1LTslRIqH7t3XKn8bocPrRHai8cll2Pn5fndr8qxUuXhYURJEjOOvM9DFjfIhCAl21VVZW8sPt5uXjhilysPa+8QP4ga9asphfIIvQHT0kCJEACJBAZBJITYqWqOEPaezwBsBJURIyCjGSJVS+AspAACUQeAc78yOtztjhCCQyoxOfXG7rl4PlGGRoe0xRiYzzxp5/YWCKpEcqFzSYBEnA2gRHl4n7x0mWprb2qkhuOSOWa5bJ546aQ9v5AqKjrdbXy5pv/TQ4f3KMMDwnKcLNKGyWCUeBpAuPH22/9Vznw9fv6FDgfDBLw0phtQb2Dg33yzTd75J23/1E/2K2sWimbNj8t2x5+XvILSr1VI9zW2bPfyN5P35TTp75Sv38vscpD58UX/0Zysgt8qmA3KszGy8Jax2yO96mYjw+DWbeP0836I+TKaW5uVt4+LhX6LCuk58msGxmCByYlJcsTTzwl7773R+0F8vWBo3Ls2HF58umn6QUSgv1FlUiABEiABJxNIE4ZOzZU5UqpSoJuLXgpITM13tmNo/YkQAKzIkADyKyw8SAScB6B+JhoKStMli0r86V/0JMMLCHOJcuL0gXfsZAACZCAEwkMDQ7IwYMHpL7+pmRm5sjmmo2SlZMT0k0ZGxuRuusX5OaNS1JVvUl27nxRJWkckN/+6/8t/f39PnWHsaG9o0WQXwPFX54QhLhqVJ4wyO2RlpYtmRkeN//6+kty7txR9cZ5ufK++J7ExcbLG2/8Z81tLsUTTuttr/Hj2y/+T7L9sRdU8vkU7dlhDBDpyhMkX3m3IA/IG2+4tSfIieNfy6aNT2odrcnRTRvu3r3h9YZBOLA0VUdhUcUkeRiUWtvuai+TxMRUyc3J1zlVGuqvqs9aFItBHU6ssKhce5wg/wg+M3lXcIy/vCTg3tPTIS2tTd58KWiLPT+L3VgzF6bBPBbGjzfeeF0aG+/IurXrpLyiQuejYDimYFIXHWa0oqJ8ghfIJ59+opOkI08ICwmQAAmQAAmQwPwRQBY9RLhglIv5Y8qaSMDpBGgAcXoPUn8SCJCAOyFGdtWUKAPIxDdt8XYEvmMhARIgAScSwAPdL5UBBOGjEP5qy+YtkuhOCvmmZGcX6twblZXr9UP5b775eEqdx8bGtPFjz0f/Jg0NN2Tjxsdkx87vS0Z6tvc4PNRHPQhHhfLMMz+W9LRM9fDVJRnKyPDEE9+T8vI1Ul1dIxcuqFBVKv/HXAq8P86c+VKH1XInJcmjj35XHn74WRX66n5iSeMZAYMBDA8497dffFWys/OVB0KeOu7+w18YG5pbGuXQoY/l668+lBt1ZyYYhPLyy2TVqgdl27bduh6TSB3GHnNMWfkKWbfuETly+FNt8OnsuCNt7a06F0mp8nr51rf+TB566CnV9lg5eWK/yoXyttZl9/OvSEX56knhuNBG5Gj58MPfKm+JeHnm6Z9I6tqtk+Sc4gGCcGTHj5+Qd995T/dZZfUy2fTgJlm5slKNxVVSUlIsSerz1LR0iVJ9hgf3LPNDAF4g27fv8HqBICdIbW2tPPjgRnKeH8SshQRIgARIgAQ0AeQ/vdOm8t6pCBjDw/dzgCS5Y6SiIJ1eIBwnJBCBBGgAicBOZ5MjkwDcPZHsHD/2wgccdiL8mwRIwAkEEP7q3NlzcuuaxysC4a+qV1aHfEgZlytWP8A3D/HhwTBdgYdEVlaBDhuFHBrNzdeUR8OyCcnSEVYLxo+D33woWx56Rhs9YPyIioqSstIqbWiJi0uc5L0w3bn9fd/S2iwXak+rHCI9snrNZlm79mFJTcnwJ64/h9HigXUPy4oV6yUxIXGCPvBw+Wzv6+rh/D9pwwe8Y2DQiI9zK+PPHTl//mt5//1faQPQSy/9raxevUliVb4R5C+pu35eG2LA5fq1SwIvEhhLUpQ+XV1tOjE7Qm8NqJBfKakZWgcUsKy9IFJaumI8H8n9PCjw6ujq7lTGmA/kyy/ekbXrtqnzxU8yfqAep3iA6EarMqAMO/g58FWTHP7msDJ4ZGhjyLLlKyQjNU17h6xctVJ5LVRIerqnT7lXMPRm99vjBVIhu7Y/KmdPnZOb12/LkSNHlcFuHUORzQ4pjyIBEiABEiABnwSGRsaktq5NXvvonHSM5wCBYFleirz63BoaQHxS44ckEN4EaAAJ7/5l60jAS2BgaFRqb7bJjcbOCVTys9yyqixL3PH0AuFwIQEScBaB0ZFhOX7iuA7jhAe4O7Y9prwP5ubVsBAEjDfETM8F48KOHd/TD/jxsP+zz96S3NwSKS1ZocM0fXPgQzl14gudgBxhqKwJxhECCj/zVeCt0alCTt294wmhVax0QHgqaygrf+eCEcR4bxgZeK9cunRCPvnkd9pj48knf6S9MtCGmJh46VRGjKNH9uqwXWh7YWGJPl9ujserEYah4aFBxeayTr7+wguvqjfrt+tQXH19XcowtFKHGEOelYu1x2XVyo3KI6Rae4XAMFJXd0kbO5AI3nhzjIyMSnNTg1y+fFondq+oWDkhr4m1fU7xAPHVJ8id06pCfB3+pk3np4hXRrKsrBxZvWq5DpH1wPp16iF9jTaGwDsE3jM0hvgiOf1n4LdhwxZlmHtd7qhQdYePHZFvf/sFNZ4Lpz+YEiRAAiRAAiRAAgET6O4f0saPrl5PEnQciIToQ8OecOABV0RBEiCBsCBAA0hYdCMbQQLTE+jtH5b9Jxvk46N1E4QfWV0kJXmpNIBMj5ASJEACIUZgcHBQLl+7qh/eLl9WLWvXrHVE+Cs7RuQECaTAuABPjqee+qH2dEAC9coVayVVeTTAeGDCaD3xxI/0w3+7kSGQcwQqg5Bc3T3tOsRUbGyMFKqE5wnxCYEePkEO3hPdPV1y6tSX2pgFA87D2741ISRVZka2PLhxl0oef04ZOX6hw1shx4c9gToMFY888i0V5upJrzEDIblWr35ISkrXyN27e6Thdp1K/t6v86hUr1yvwoEd0UYOe30IrXVNGUyQLD4jPVPXkWwJ2WVtxFw9QDCW8RPs0ts3IL1DvvPMwBCCAs+Q+oY6/eOK3id5KndLcVmBPKDm18aNm2XdAw9IcfFSGkNm0VlxylBXXV2pjUswgJw6flYunL9AA8gsWPIQEiABEiABEvBHwBW9RKpLM+WnT1Urg8eYVyxZhf4uyg79l6X8tYufkwAJzJ4ADSCzZ8cjScBRBEbUwyqUwaGJD9r6Bu7HxHRUg6gsCZBARBNAbN+bN29KU0OD5pCdl64eyhaFfPgrX52GMFWBFnhxrHtgm2r7d+Xtt/5f+fLLD1RYrESVi+OgflC/actTsnnLs+rhdFqgVc5KDkab/r5efSyMDnFxsdozYLals7NNe7bAi6No6SplBFkhLle01xsDHhYImVVWukq9PZ+u21rfcFWHwbIWGE+Q5wReI1avDLc7WeLGDTRdne36EHiHrFheo40bqO/atTO6PuMpA4+QSxdPKmNJnw7HVVq2Uunku41z8QDBWD506LC8+8ePpKXNE85tthynO65N5cq5eObydGLe72EUwYN6/Bw/clL+qHT0ZwzBw32W6Qnk5RfIjl275IAKPQaPpdNnTsv2nTsduXZN31pKkAAJkAAJkMDCE0D472WFaVKcez/XnNEi1hW18ArxjCRAAotOIPA77kVXlQqQAAnMhUCqO04efaBQSnInJgdGCCx8x0ICJEACTiIwpp44X71yVRrvtuuQPXg73a2SDDuxBOoBYtqGUFiPPvai8k6o1SGvfvuv13TODCQJ37nzO1JUWDpveT4WgieMB329XdKhQmqhZGXmKOPE/VwcRgcYNZC/IyEhQYfJamu9I6PjXguQgSEGid2RWB15T+wFScxNGRoa0gYWGDVWVG3SOT4QBqu9o1V7lSAvCzxC4BmCBOrV1eu1Xghf5qv4+diX6KTPMJYvXjwn7737tgrRddmvkWXSgbP8AB4eMynGMwS/jWcIjCGffPKFIO/O07uekueee1aWLi1maKwAwCIZ+upVa1T4ugK5fOWCWscuS2tzk+Tm5QVwNEVIgARIgARIgASmI6C2loI8IIiCYS3wDHFFx0iU+N7PTVcvvycBEnAuARpAnNt31JwEZkQgLjZaVpdlSlXxxAS12AREqzckWEiABEjASQSQ/+OqCn+FkEkIwVSxbLkj8n/4YjwTDxAcj1BYMHIgz8elWvUWuXpojhwoCPuE3BbzmevDl774DDonjBsp4LUxODikk5HPJuwWjA1DQwPq+G5txEDi8hiXa4IHh+ecURIXm6DCnLlVKKvbKlRTrz7nXAqMGtVVa3XieBMGKzsrX9Wrwl8pjxB4hmRmZUllVY32GPFX5uIBgjqTlNGmorzIX/Xz+jnYwbtlNsUV7dJjDZ42VVUVsmnTJtm6datkZ2fT+BEgUORPKS4p1v2NuXvt6lW5obzZsnNyyTBAhhQjARIgARIggakIjI6OyemrTbLvWINYI15kpiXI05tLZMXS9KkO53ckQAJhSIAGkDDsVDaJBHwRwCags2dIem0hr5D8PDUplqEXfEHjZyRAAiFLAPkSbt26pXJQtOkcBVWVVY7M/wHAM/UACYVOgREGxojUtDzVDzd0Xo3Ork5lhArshtJ4TMBwAG+NQT95KYLdVhg1SstW6wf6JgxWdXWNNhBcvHRah7+qWLZBh+SaKsH7XDxAotTBO3fsVEnGl8nAQHDzgDQ03JJf/8u/yEcffxIwWrvRo6ZmvWzZslVK1EP89PR0Pe8QaoIlcALgVrVylXy6d58y5NZL4+3GwA+mJAmQAAmQAAmQwJQERkbvSWNLr3x5tl5aOjy5z2JjonRIrC2rcqc8ll+SAAmEJwEaQMKzX9kqEphEAO6fe4/fkKO1dyd8t7YiW3ZvLVdGEIbBmgSNH5AACYQsgfb2dmlsbBSE5cEb+vkFBY59CDtTD5B7ympwva5WPv/iXR36qqxsuQ4JhSToxcWVsk0lEF8IL5DsnKUqbE+xnD1zSG7euCTNTbdUYvGlUxoKMKCGhwflbpPngW9uTr4O+QRjiv5OeZPAswMFRgWrZwUSr1sNJfFxbp13ZC5eIAiDBeMGjBz19W/rMFgtrc26LVevHNPhr5BoPiszW+vkr8zFAwQeAXj7PzM7Z0L1MIwgPNZ8/UbltRfOS2r6RE9Qf23KVN4xxtMDRo+a9RtU0vhqbfSIVeHEaPTwR276z7PUmrVMea0h1Bs8cuDNhnHMPCrTs6MECZAACZAACQRCICPFLWV5KZKfkegVL8hQoVITfOdzC6ROypAACTiXAA0gzu07ak4CMyIwMDwq1xu65eD5Rhka9iREx1sQKE9sLJHUGdVGYRIgARJYXAJtbe3S2d6mHsK6JD83XdyJ9/M7LK5mMz/7TD1A2jta5JsDH+r8H6Vlq+SRR76lvRUOH9yjjSKFReUqvM7qaQ0RM9d04hGpKane8FHXr5+T06e/lpLSKpVU3L+xAOGuLildP/ro33RlO3d+VyceR96O5GS8kafyISgDBLxJkOzcWsCpSyVLb21p0d8hbFS06v+5GEBguIBxA0YO8EMYrPr6Syr/xxV9HuRVqapCWLGpx9dcPEDQRhhBfMWjNp/N1+/YGN9bf+PlgYTw2XnpsrRwqdDoMWH4zesfMHSUlZbqcYwE8/Bm61BGXeYBmVfMrIwESIAESCBCCSDR+dqKDCnM3qBelvI8+wCK+FiXZKZOvaeLUGRsNgmEPQHfd0Fh32w2kAQij0B8TLSUFSbLlpX5ExoPDxB8x0ICJEACTiLQ3d0tXX292nsAb7THxXs8CJzUBuiKB+f+PEDsHhCQh/fEqZNfyf79f9DJwGH82LHz+8rosUx7YSCXRXlZlWRlFfg1RMz1Yb1hjHBXlZUbVYLwjXL0yD6tU35+qcpF8pQOhWU/z8jIqNy8dUUbPz7++HdKv0xZs2aLri4tLVsl0V4mp099JU1361Vos2ad58QU1DWgwlE1NtbpsFTwTECb0f8qhbpXbjb/gXGjvHyNNnYgDNbF2uM6wTzOs2rVg8q7qHRaY9JcPEBmo/N8HZOSnCpl5ct1Po/Vq1erUHLVqi+UYUt5esBLAR42MM6wzD+BrOwslQx9uTaAXLlxTZqVwY0GkPnnzBpJgARIgAQijwD2LohwkeIjygV3NZE3HthiEgABGkA4DkggQgi4E2JkV02JPFiZN6HF+Bw/LCRAAiTgJALtbW3S2+1JgJ2fny/x6o1qpxQTAqqvz5OIenhoSIXBua7DWeGhe2NjnfaSiIn1uOjDOJCelqkNJQh9tWfP6zr5+2PbX5TNW76lvsvSyc+3b39OP8A3hohHH92tk5LjfO0drdKhjAqm1Dcg5E63DjnV3HxHrlw5q70wUBITUwWhqQIJo7Vs2WrZ9fj35O6da8pwcF7eeOM/S1vbbanZsEuFwypW5/eEHcD5r107K5999pb2tEDZ+vBzsnLVFm3EwIN4GEOOHvlY6pQ3ydGje7XhIVUlREeB9wd0PHHiK/23ycsxHw/nkdujsKhCli9fq5JSn9OhxDo77mgDTWVljSQrLr6MUVqR8WI39li/C7X/xyUmyLp1D2qjB7w8KldUyZrVqyRPhZFDaCuE3JoPrqHW7lDTJysjQ3IKC/X4b2tuUfOmPdRUpD4kQAIkQAIk4EgCY2P35E5br9TWtcmgioSBEqde+oyJiZGVpen0AnFkr1JpEpgbARpA5saPR5OAYwhEqwSlSHaOH3vBdywkQAIk4CQCyAGCEEWxsTGSmZkpLnVD45QCY8DeT1/3PsyHZwOMES0tTboJH334a/0Q3hgPtmzZKU8+9bKMjAzLvn1vqjwOh1Xej1UqfNR3tKFiiXpgDY+LzVuelQu1p7UXyGefvS2FhRXqIXeNdPd0yZf735Evv/zAi6i7+65cv3ZZ+pQXzYGv35dz5456z1e9cr08++yrUlhQouueqiQlpcnmzU9Kb0+bvPvOP2kjSH39f5LPP39fiktWaOMBSktrk9TfOqfPibA/Tz75I3lu96s6ZwjOEReXqI0hMIq8//6vNAPkAlmxvEYf39bWKEeOfCEXLhyZ0PapdJvJdzAwrah8QBtgkNME42rT5qdk+Yr1+gH1dB4e030/E12CKZuZlS0v/+gn8tJ3h5S3xwopXLqUeSeCCdxP3SrymjI4JklGapqWaL7TLj3Kq42FBEiABEiABEhg7gSGRsa08eO1j85JR8+gJMR6Il7kZbjl1efW0AAyd8SsgQQcR4AGEMd1GRUmgdkR6BsYlkNnG+Ws2giYkhCnkr9mJcmj64vEHe+ch4ezI8CjSIAEwolAb1+fdHV53pjOzc3S4aCcUGBP6O3t1iGW8DAfHhim4KE7CpIi4wclJjZOh4bqVLkv6q6flxPHv9ZtfeKJ72mvD6uXBkJGPfPMj5U3xk25UXdGe1EUFy/ThpPbt+smnQ/1myTM1vOlKo8SeI0EUmC8yM7K1waajIwC+frARzo3yaWLx/WPtcDwsXbdNh22C54r1oTpqAd/P/X0T/QhMMp8+Kd/lcNZH+m/+3p7tYfM+vXb5amnfuhtOwwPCNOEhOjw2IC3jK98HS5XjEq0Hq+NLympKon3uHeN0Q8cKyvXqxwma3RCeSQ/f+CBzSo/SM60RiDUMY2dyJxm0X+nq3Bx23fupJfHIvcEzIoJiYnaeIuCMdfZ1bHIWvH0JEACJEACJBA+BOD5AeNHVy9+PO2KUzlAhoZHwqeRbAkJkEDABGgACRgVBUnA2QR6+oflSG2TfHq8boIb6NaVhVJTlUsDiLO7l9qTQMQRgBFhaGhYv6mPZNgI3eOEggf2+XlF8pOf/C/aWBFIycjI1cfgIf2fv/J/qAf5CTrZOLwvrAXfwyjyd3/3n5TXR7sySOR6jAPqYT68LTZuejyQ0ymvjXR9vum8P0xlkEPi8x07XtReHEgiXnf9rA6tNaTCe8HYkKJCWRUXV+oE7cXKoIPQXPaCUFRI3v7SD/9e64o6GhpuarGk5GQpK12lkr5XTzoeBo9HH3tRKpW3iz/d4Ymy+/lX5OFt39JcEHLL6rVxT/0Bvf7i1f9TeZu8qs9ZrnRJTPR4sNh1tf/tFA8Qf8nW7e3h38EnkOhOEhikUHp7eqRHeWohZAfDjwWfPc9AAiRAAiQQ3gRc0UukoihNfvpUtTJ43E+CnpkcL0XZyeHdeLaOBEjAJwEaQHxi4YckEJ4EEuJd6sHZ/Wlv/X94tpitIgESCFcCMH6MjODheozEuOL0G+2LXWJjAttW4eF/5Yp1M1IXD9iXFrnVT6n3OF8P3REKa/XqjZNkyssqVXL0yoDP6avu6Q6GAQZeKAid9cC6h6Wvv097n8D7Isbl0sYElyt6ynBSMILk5hRIpjJSwJgzPDKi60hQSe4REsxXXhIcY22fL921h4eFuV0GRhxf/WKX88cAww96Dg8PKBFP3hN/svycBEDApcKPZmSk6VBYnR1t0t7eKQMD/WqecPxwhJAACZAACZDAXAjgGltekCrFuZNfZIl1Mfz3XNjyWBJwKoHA7tSd2jrqTQIk4CWQ6o6TRx8olJLcpAlU8rPcksQk6BwpJEACDiWAEFGhUtxJyZKYEK88H27rpN14MO+vBPpg3Xp8oMf4kvP1mT/d5vo5jAkJCW79Y9c/ED0gA3YIV2U/3p9ugdbr73jzeSD1+KsDXklj91olJ6cqJAxy/vTk5xMJdNc36A/cbveC91uimiMI3QYDSE93hw7zRgMIRygJkAAJkAAJzI0APCoRAqu1Ey+m3C/wDMlIUS/VjOcEmdtZeDQJkICTCNAA4qTeoq4kMAcCceoiv7osU6qKPeEWTFXYBDAJ+hzA8lASIIFFIzAy6onhG6M8L0IhbExuXr7yRMiXfZ9fkPqGOhWyKXCPi0WDGKQTz8WQYFSajzqC1LwJ1cL7o7+/Vy5fOqE/X7V6VUiMx4Vou9PPcfrUN3Lp2lnZXLNR1tesX/B+i4mJkURleEHp7hmQkeFhpyOl/iRAAiRAAiSw6ASQBP34xRb5930XZGDofs6Pgoxk+cHjK2RVqScH16IrSgVIgAQWjAANIAuGmicigcUlMDo6Jk3tfdKuEoENqw0BSoxy/0xOiJUC5QUCN1EWEiABEnACAeUgEJIF6+iGmho5e/66HD+2V71RnqxDOU3lCRKSDaFSMyIA48fJU1/L9bpz8uTjNZKXlzej4ym88ATG+qLl7OWv5Ow3X2rjx8svv+zNx7GQ2iA/jrWMjI4u5Ol5LhIgARIgARIIWwJtKvN57c02aesekJhxr2wkRe/tLw3bNrNhJEAC/gnQAOKfDb8hgbAi0NkzJH/65rocvtAo/UOeG+wE5RWypjxHfvJElWSmOiOBcFh1ChtDAiQwKwKLn+3Dv9rly1bIC7ufkDf+/V358INf68TcSLqdmJjq/yB+40gCw8OD0tfbJdeun5Pbt6/K1i0V8tDWLSqZdY9Eq7wnfLEgdLvVavx45ZVXJD+/YMG9P+x0hgYnhumwf8+/SYAESIAESIAEAieQkeKW9ctzpF0ZQOLH86BmJCWIW70AykICJBB5BGgAibw+Z4sjlMDI2Ji0dQ7KxVvtMjTs8QCJjYmS9OR4wXcsJEACJOAUAqHqAQJ+eOj94IMbVbLuWNm37zO5du0LqbtON3unjK2Z6omcH/EJifLsMw/Ktke2SWd7u1y9ek2NgQ3iYjLrmeIMurwvz49QMH4EveE8AQmQAAmQAAlEEAEkOl9bkSG5GWtkWEXCMCVBGUJyMxIjiASbSgIkYAjQAMKxQAIRQiA+JlrKCpNly8r8CS1eW5Et+I6FBEiABJxCIJQ9QMAQ+UjWrVsnlZUrlAHkmty8cVOGGdvfKcNrRnqmZ2yUyhXLJTM7Rxu/kMMhLzebYc9mRHHhhO2eH4WFhQt3cp6JBEiABEiABEhgQQhgL56SFCfJiZO9PZao71hIgAQijwANIJHX52xxhBJwJ8TIYw8slQcrs5zHxwAAIABJREFUJ8Ymx+f4YSEBEiABpxAIZQ8QwxA3XonKA2DlylX6x5Qxp2T2dspgWCQ9o5D5fLygr03JzskV/Fg/WyQVeVoLAX+eH4REAiRAAiRAAiQQfgRGlNfHtdudcvJKkzf6BVqZrJ57bKzOUzlQk8Kv0WwRCZDAlARoAJkSD78kgfAhEK3eTM1MiZdUd9ykRuE7FhIgARJwCgEnvbdlfxAeJU7S3ikjInT0tPd36GgW2ZrQ8yOy+5+tJwESIAESiCwCI6P35Gp9h/x+3yVB4vPB4VGJU1EvSvNSJD/LTQNIZA0HtpYENAEaQDgQSCBCCPQNDMuhs41ytq5tQouXF6bK1jUFkqpcRFlIgARIwAkEnOAB4gSO1JEEwp0APT/CvYfZPhIgARIgARKYmoAxfkwtxW9JgATCnQANIOHew2wfCYwT6OkfliO1TfLp8Tr9BgQK3oLYurJQaqpyJZWkSIAESMAhBOhD4ZCOopoksMgErJ4fL7/8spSUlCyyRjw9CZAACZAACZBAsAm4opdIRVGafH/HikkhsIqyk4N9etZPAiQQggRoAAnBTqFKJBAMAq6oKEmId0lcrOfHnCMxnvk/gsGbdZIACQSPAD1AgseWNZNAOBCwe3688sorkp9fEA5NYxtIgARIgARIgASmIeBSIb7LC1KlODdFRsfGvNLR6plIrIvhv6fBx69JICwJ0AASlt3KRpHAZAJIdP7oA4VSkutJ+DU4ouJguqJ1DMwkJkGfDIyfkAAJhCwBeoCEbNdQMRIICQJ2z4/CwsKQ0ItKkAAJkAAJkAAJBJ/A2Ng96VURMFq7BgQJ0VFgFMFPZmq8uPkSaPA7gWcggRAjQANIiHUI1SGBYBGIi42W1WWZUlWcMeEUcA9lEvRgUWe9JEACwSBAD5BgUGWdJOB8AvT8cH4fsgUkQAIkQAIkMFcCQyNjcvpqm7x34LK0dw94qyvISJYfPL5CVpVmzvUUPJ4ESMBhBGgAcViHUV0SmC2BUfXmw+2WXmmzbABQV0pirHINTdZvQ7CQAAmQgBMI0APECb1EHUlg4QnYPT8Q9ioqiivGwvcEz0gCJEACJEACi0ugratXTlxuku6+Ya8i/UOjyjNkaHEV49lJgAQWhQANIIuCnSclgYUn0NkzJHsO18nhC42CCz9KgvIKWVOeIz95okq7grKQAAmQgBMI0APECb1EHUlg4Qj48vxg2KuF488zkQAJkAAJkECoEMB9AqJcZKS4Zf3yHOkfHPGqVpSVIu6E2FBRlXqQAAksIAEaQBYQNk9FAotJYEQl/2rrHJSLt9plaNgTBzM2JkrSk+MF37GQAAmQgFMI8H1up/QU9SSBhSHgy/NjYc7Ms5AACZAACZAACYQSAdwnRC1ZImsrMiQ3Y40Mj+cAgY4JsS71WWIoqUtdSIAEFogADSALBJqnIYHFJhAfEy1VJWnS2p0/QZW1FdmC71hIgARIwCkE6AHilJ6iniQQXAL0/AguX9ZOAiRAAiRAAk4kgPCX7oQYKU9InaQ+jCMsJEACkUeABpDI63O2OEIJYAOwbW2RrFEGD2vB5/hhIQESIAGnEOBti1N6inqSQHAJ0PMjuHxZOwmQAAmQAAk4kcCI8vq4drtThf++4w2BlRDnkmT13GNjdZ4UZCU5sVnUmQRIYA4EaACZAzweSgJOIoA3HWDoiI+b6O0RHRWlXURZSIAESMApBOgB4pSeop4kEBwCds+Pl19+WUpKSoJzMtZKAiRAAiRAAiTgKAIjo/fkan2HvPvVFenoGfTqXpqXIvlZbhpAHNWbVJYE5ocADSDzw5G1kEDIE+gfGpFDZxvlbF3bBF2XF6bK1jUFkpoUF/JtoIIkQAIkAAI02XIckEBkE7B6frzyyiuS//+z9yXwUZVX+4fs+76vkI1AwhYI+44sgoKIiru1aq3a2n6t3b/Wz/771a97a2ttrda61KWKCIKsKrITEAhrICRAFsi+7xv/87zDncxMZpIJhGQmOef3m18y9773fc/73Jk7957nPecJjxjagMjsBQFBQBAQBAQBQUAQEAQEAUHAIgJCgFiERnYIAoMLgbrGVjqYVULbD1+g5tZ2NTlX1v6YPjqS0pJDqWt1zME1f5mNICAIDB4EJANk8JxLmYkg0BsEzGV+REZG9qYLaSsICAKCgCAgCAgCgxwBJ8dhFB/lR3fOS6KW1g79bFECKyrYe5DPXqYnCAgC5hAQAsQcKrJNEBiECDhxqSt3NydyddG9tCl6uIn+xyA83TIlQWBQIyAZIIP69MrkBgkCLc1NfT6T3mR+oP53Q32dVT64uroSXmKCgCAgCAgCgoAgYP8IODk6UFyEL8WE+nSZjIuTQ5dtskEQEAQGPwJCgAz+cywzFAQUAtD+mD0+kmJDvai57WoGiJOjqoHpJSLo8ikRBAQBO0JAMkDs6GSJq0MWgfKKSsrat4vGjJlCbj7XRy70NvOjpqaGMg5k0JmzZ6m5ByKmra2VoqOjafbsOaqUloODUKxD9kMrExcEBAFBQBAYFAh0dFyheq6AUVzZQK28IMKZCREYiJHQAA/ylEWgg+I8yyQEgd4gIARIb9CStoKAHSOATI+xcUGUOiLQaBYQQB8mD/t2fGbFdUFg6CEg4cmhd85lxvaHQH1LI5U115JDOzJBro8A6U3mB8iPD9d8SNvW/YeCPFrJ182Vml2CybWltMvfE+cq6dOjJ2lEXCJ5uHvSsltvJQdRGbK/D5t4LAgIAoKAICAIGCDQ0tZBx3Iq6OO92VRZq8tIdeNKGAFe7nTXTUmUMtw4JiLgCQKCwOBHQAiQwX+OZYaCgEKguaWdSngFRFGFcTmIIF8PigjyIjcXR0FKEBAEBAG7QEAyQOziNImTQxyBhNg4Gj1zMbn4X7vKmLnMj+6yNAzJj6WTo+mmqSnk5Rds9kzszcqn6qZtNLIkjJy9Xcy2kY2CgCAgCAgCgoAgYJ8IVNTU05HsEqptaNVPIIF1QeobW+xzQuK1ICAIXBcCQoBcF3xysCBgPwggBXRLxgXKOH2ZGpkMgbkz6TEmLoTuW5gsBIj9nErxVBAY8ghIBsiQ/wgIAEMEAdPMj+4Ez03Jj9uWzCVPd/OZJ4dP5dCGDduoqraRlkxPoJyKToHUIQKtTFMQEAQEAUFAEBjUCAT4eNKExBBqbG7TzzMqyIfvDWTRw6A+8TI5QcACAkKAWABGNgsCgw2Bto4OqqhupjP5lUZTiwjwJuwTEwQEAUHAXhCQDBB7OVPipyBw7Qgcy9xHJ1hDZEpaOj3wwANKn8OSWUt+1DfyfdD5Anp9zVZFfiyalkKNDY1MgJRZ6lq2CwKCgCAgCAgCgoCdIQCh87HxAaz3MUZpgGjmzmWwoAEiJggIAkMPASFAht45lxkPUQTcnB0pPTmE3N2Mv/aJkb6EfWKCgCAgCNgLApIBYi9nSvwUBK4NAVPyIzY21mJH1pIf6MCU/EiOCaIjXApLTBAQBAQBQUAQEAQGDwIOrHHq7eFCrkx4mBrIETFBQBAYegh0vRoMPQxkxoLAkEAANwBTUsNpHKeBGpqbqyOXwpJLwZD4EMgkBYFBgoBkgAySEynTEATMIGBKfkRHx5hppdsE8uO9996mHRvXEzQ/LJW9Mpf5AfJDTBAQBAQBQUAQEAQGHwJtnPWRe6may38XGZXACvFzp/RRYUoDVUwQEASGFgIS9Rxa51tmO8QRcHRwICdH43JX2CYmCAgCgoA9ISAZIPZ0tsRXQcB6BEzJD2syP4T8sB5faSkICAKCgCAgCAwFBNrar1BOQRWt232Oquqa9VMeHuZD4UGeQoAMhQ+BzFEQMEFACBD5SAgCQwSBxpY2OnDiMp24UKGfsTtnf0Tx6ofpYyLI18u8UOgQgUemKQgIAnaEgGSA2NHJElcFAWsQqK+lo1kHKe/LQ0rz4+GHH7ao+YHvf+01ZH7ADWh+SOaHNSdE2ggCgoAgIAgIAvaLQDtrnLpymW+UwHJ1aVcTcXdxJDd+7+IsYVD7PbPiuSBw7QjIN//asZMjBQG7QqCusZUOZpXQ9sMXqLlVdxOAm4LpoyMpLTmUfO1qNuKsICAIDGUEJANkKJ99mbu9IdBU00xuPrpFFob/Yx4O7U3U2NpEudmlRPxavHiqEjyPjIy0OE2QHx+u+VDKXllESHYIAoKAICAICAJDGwGU+E4eHkCP3zqeWltb9WB4eTpTdLD30AZHZi8IDFEEhAAZoidepj30EHDiUlcBvq4UGuBpNPlAroOJfWKCgCAgCNgLApIBYi9nSvwcqgg4OTlRS3MT7d7xCRXm55K7s5tZKEB+ZO3fScVFuXT33Su6zfxAB9eq+bFiXhrFhlgOeBQVlFBBYaFZH2WjICAICAKCgCAgCNgXAhBBD+O4R4i/h5HjDsNkGZV9nUnxVhDoOwSEAOk7LKUnQcCmEfB0d6YFE2OMSj+4uhD5ebkR9okJAoKAIGAvCMiji72cKfFzqCLg7e1NM2fNokNfnqSO8nyqtwBEfaMDl7oKp0cfm0CrV9+tyl4haGHOrpX8QNmr7sgPbaxmJmzEBAFBQBAQBAQBQcD+EejouEK1DS1UUFZnNBlnRwcKZVJEyn/b/zmWGQgCvUVACJDeIibtBQE7RcCNa16OiPAlCH+ZmqVgg2k7eS8ICAKCgC0gIBkgtnAWxAdBwDICXl7edOddd9Ptt3eWnbDcWrfH1dWyFtm1kh89ZX5oPoVFhVB8XHxPLsp+QUAQEAQEAUFAELADBFraOuhYTgV9vDebKmt1Cxyg/xHg5U533ZQkBIgdnENxURDoawSEAOlrRKU/QcBGEWhqaadLvAKirLrByENkgMSE+rAgmKONei5uCQKCgCBgjID59eGCkiAgCNgKAlhY4UDDyMnRMqlhra/XQn6gb2vJD2v9kHaCgCAgCAgCgoAgYD8IVNTU06mLFVRW1aicdnF2UHGP+sYW+5mEeCoICAJ9hoAQIH0GpXQkCNg2As0tbfTpl3mUcfoyNTIZAnNn0mNMXAjdtzBZCBDbPn3inSAgCBggIBkg8nEQBIYGAtdCflTVNgr5MTQ+HjJLQUAQEAQEAUHALAJOjsMoNtyXFk0aTuVMgHi46Up+QxM1yNdYF8RsB7JREBAEBh0CQoAMulMqExIEzCPQ1NpOFdXNdCa/0qhBRIA3tXV0mD9ItgoCgoAgYIMI2HIGCGoOd1y5QhBZtFReEG1aW2X1mQ1+tHp0ydHJmbMaHHpsJw2uH4HekB97s/Jpw4ZtalDJ/Lh+7KUHQUAQEAQEAUHAnhFw5Hu1xCg/Cg/0pPZ2XawD22C+ntefnWrP2IjvgsBQRUAIECvOfHl5OZ08eUrfMiDAnxITE6m7WsVWdCtNBIF+RcDN2ZHSk0PI3c34ax8b6kXYJyYICAKCgL0gYMsZIJWVFVReVkphLObs49OpuQTSo66uliorK6m4uJjKSsuooUGXkm8vuA91P52dnSg8IpxCQ0PJ39+foHNhieQa6lhd7/x7Q36cOV+gyA/J/Lhe1OV4QUAQEAQEAUFgcCCAxVKuHONwdHCgdoPFnnjv4iQLWcyd5cLCQsrJyTW3y2gb4qEebm4UFhFBHh72k03T3NxMp06dptraWjUfNzdXGjdunD6ua7rf29ubRo8edcPivtqzoenzRH/70eMJH0QNhACx4mQe2H+AVq26k9raWqitvY2WLFpIb739zg37IljhkjQRBHqNgLeHC01JDaeJyaH8OdaFD5Ea6sQ3AO4sCCYmCAgCgoC9IGDLGSBNTU1UVV1Dvr5+egIEwVw8UOzdu5dOnTxO9Q3evC+A7yNc7AVy8ZMRaG5uoZqaPeTpUUujU8ZQevokSk5ONiK6BKjrRwALj9599z3au20DLZ0cTbctmUue7l1Xa9Y3NpNkflw/3tKDICAICAKCgCAw2BBo46yP3EvVXP67iBqb2/TTC/Fzp/RRYRQR5DXYpnzd8/noo3X0zHe/p+KelszJyYWCgkIoMCiI74Hj6Q6Oky5ddrNdECFVvAjt4Ycfo8zMQ2p6iQmjaNv2TRQbG6vel5WV0Teefor27t6r3o8bN4nWrftAv19t7CMrLiqif7/9Nj8LutGjjz5iFFvuTz/6aDp2041EPXtxqkB+iAkC9owAVjy0UQeX77Dl8KE9Iyy+CwKCQH8gYMsZIEH8QIAVQ1qWKG5wP/v8M9q9axc5OsXS6NSVnEEQS56e3v0BlYzRxwjU19dyBs9FunD+BB08+AHNnTOa5s2fR9HRMZIN0gdYgyz88MP3eyQ/MJRGfkjmRx8AL10IAoKAICAICAKDCAEs+MwpqKJ1u89RVV2zfmbDw3woPMhTCBAL51pb9G1ht1oQXlB4Qb1AJGzf/jl9+9vfpO985zs2TYLYyrMjsjs+3f4p/fmFP9KOL/bQk9/4tiWoZfsNQEAIkBsAqnQpCNgiAo0sgn7gxGXKuVzNqyCuiqC7OlIUr36YPSGKPK8Kg9mi7+KTICAICAKGCNgyhQviQyM/Ll68SGvWrKGsM5W8imglJSVNIF/O/HByMi476ODQQR0dDhxAl7+2jIP2GYyJTlDn8uzZI7R3/y5+CCygu+68gxISRwoJch2XKsn8uA7w5FBBQBAQBAQBQUAQMEIAJbBcudKFn0Gyhxu/d+GSpmJ9g0B5eQn95te/pajIKHrwoYds9j7YmmfHtrYbv+B9185d9PjjTykCCebpLlq8ffNJtK4X+eb3gJOtMIU9uCm7BYEeEahrbKWDWSW08UCOvi1uCqaPjqQ0LoslBEiPEEoDQUAQsBEE7OG3GXV0NfJj5ozlHDAfSy4ursT66OplaO3tulrE8te2cdDOGQiswIBgmpg2R2Xz7PxiLf3n/Q/o3rvvpuFx8Tb78GcjX1+zboD8sCbzw7DslWR+mIVSNgoCgoAgIAgIAkMeAeh8JA8PoMdvHU+tra16PLw8nSk6WLKwrfmATJ85nVatXKXKNGmGTOjDh4/Qpk82U01ttdqMv++9+zYtZKmAyMhIa7q2yTbQ9/uvp7/D9/P3cdnbJgoPD1Oaf31ppWUlVFRUoLp0cjQfjjf0A+0CAvz63I++nJM99WUecXuawQ321RqmEOI1hqYJYiKN/+jRTMrNyVEf2DGpKV0ejFEa4/jxE4Qvgq+PH6VwG2vKKOC4M2ezqaS4RImqurq5UDDX4ktMSqTIqGj+MnUv7ITUq+zsbMo6fYZa25r5QhVN48eP09extjQnU7gN+4EfISEhlDwyiSKjo0UjxRSsAX7vxOWvAnxdKSa0U5QXLgVyHUzsExMEBAFBwF4QsOa3eSDn0tDQQNu2blOZHyA/kpPTyNHRsQvxMZA+ytjXjoBGYDk7828qZ4PMnrNSkSAfrV9PD/Hqt8DAwGvvfAgeiftlTfNj1ewEWjx3mmh+DMHPgUxZEBAEBAFBQBDoKwQQkwsL8KQQf2ORbodhw2gY7xPrGYGExCQub/Vf+oU9iHoCOdy3vfDCCyrzQyNBDn6ZSadZYFwjQCzFE3PPnaPDRzLJmbNwJrGWXnh4RJeFQ1qMtKCggCAQHhMbTfHx8VbfX2MRGnwxjLFqOh/dzdrHx4fuuHNVd02M9uF5Lzc3l3LO5VAl64vA1/iEePY1rlt9QOiomMoraHjhcwth+d74gWPz8/Mom+PDmDMM8d2UlNE9YmbuPGn9nTxxkkpKSigqKopGsRi8PZNb2okTAqSHj3dPq0xBALz44ou0bctm1VMcfzF/9rNnFanxu9/+mnAhQFqYm6sHjUweTQ8+eA+L3DzK5S+c6D/v/Yf+8c9/0InMk+rC4ePtSyPiEumxxx6mBx643+yXBivkXn31FVr38Xo6c/ocVVdV6L88gYEh/OGMoQU3LaKnv/k1i2I9Z89k0Z/+9Cfatu0LOn8+Wx2PY6fNnkrfeuKbFBMTRf/7q19RCV84YI88+jjdvup2owsTvhSHDh2kF//2Eu3bdUDfD+YQGhpBCxfOoUcfe5xLfozrckHrAXLZfYMQ8HR3pgUTYyg5JshohNAANw40ON+gUaVbQUAQEAT6HoGefpv7fkTre8Tv44kTJ+j4yWxKSpykMj9AfnRnVzii3tHRmQLtwKT0MH5Au5FmOOaNGM90TpjLjRjnRmJkTd/IBgEJMnbsDMo4uJUO7D/A92ELZBGINeBxG8OyV70hP+ZMGkWxIbKC00qYpZkgIAgIAoKAIDAkEQDhIXbtCLS2tujvaTUkQRQ88MADtGnLJr1geH1dnT74jtE++/RT+tNLf6a2+kby9Q9QGiE11TX07M+fpYx9GRwPdWEdvRn0y//7NU2YMF45qN0Tvv3uv/UxUmRJhIVFUWpKIq2++14Vl8T45gzEzJtvvkWGxxvGYRctWmTuMP02jP+LX/yCsk6dVNu02G5oWJjRcYgBo5TVKxyXPfzlMSYfLlJTc4OK+UIgHr4ihmooDn/kyFH61a/+j7KyclRbGOKwr732Lh3M+JKcPN3pe99+hmbPnqVwsMYP9IFnztf+9Q7rimylgoI8FXuGIb47clQCrbh1Od133/1myQvNp+rKCv34IE0Qb3777Q/ofG62UZxai2Vbwl8NbOMmBEgPJ8iay2XOuWzazCs9YYkJBcz6vU1/e+kVyj53Wt87PuQQCXruf7IJTCHMkDHFe5AgaPPjH2Wr/U888YQReYAv9M9++t/08suvdGEM0R4fdrzQR2FBDv3pj38k0y8rPuTfePop/YVKDXT12A1rmVQ5ns21++6lz7bu0Nelm7dggdZM/UVwZ/OmTcwGP2M0R+zDHPDC3PfsPUS//c3zdNPCm4yOlzcDg4CbiyONiPAlCH8ZGlZAWPM5HxivZVRBQBAQBLoiYMvXLGRD7t69mxyGBdKkSXN5hZNr1wlc3QKSAGKDlVXl6ne7uaVR7fH28qfwiOFKL6Qn8sRi5xZ2tLY2U21dDV2+dIH/VurHCw6JJH+/wG79tdCl0eb29nYuNdBEZeWlVFqSr5+Tq4s7+fmHUDg/xDg7u/X5vKzx7Ua0QTYIztGoUWlUWlpIn36+h8ZxRu1gWCV1I/Ay7LO3mR9vvbOe/LzdSciPG31mpH9BQBAQBAQBQcC+EWhr76C84lo6nlNKzW06/VPMKIjvI1LigyjUJDPEvmfb/957eXmRj4en0cDNTS3698je2P7JdhXsRzA+LW2CCqojVglD8D8nt0BlgsCQ9fHznz9H//znG3qCQGunia5DNBwLzH78o+90yWzAPeVzP/8V/fUvfzQ6XovD/uTHp+jz3V9QeVmZ3kfTf+qYxMk4lKGPlY4bV8l9NRs1w+fqjTdeox/96Fk92aA1wFiar3uZ5Hn2f35GTz31lCKQMK5h2TDtGK09yJOH7nlQbW5qaurRD8RkQTI9870f6TE1dBRx4b27SxTZtGv/Xnr+5/9LqampRnMx9AlE05IFi1UpM9N4M+K7J08cVbHstrZW+ta3vm23C72EADH6CHR909tVpmD/nvufn5Mz1/lewjXwwHaC5cMHBl9yfHj+9xfPq4FcXJwttgFrecsty4yyODZu3EBvvfWOnvxITBhFI8ckkicHFeo5aHI045ietMCXa8WKz+iee+7VTwpMIljHvbv36rdFRQ6n8ZPHqj40P+EfAjIwc3XpTjEj+uOfPKsnP5D1MX3aZDVXsIf4smtkDhheZJQkjUzWjyn/DAwCuFhfKqunooo6IweCfD0ogoXQQZCICQKCgCBgDwj09re5P+eUk5NLhZfKKS5uDnl7W64bC6IgL5/TwL/8lI4dz+Ab/zwmDmqVq87OSPdOokkT53J6+AImJoKuOyME45VXFNOBA1s5g3NHl/FCw2Jo7JjJNGXqUiYpontNUKD/6poKJQx+MGM75eSc4hv4BqM5ubl5cFr4aL4Bn8YkwcwbQvD057k2HMvNzZOGjxjFxM9xlXZvLqV/oHyzxXGvJfNDyA9bPJPikyAgCAgCgoAgYHsItLVfoTMXK+itbaepqq5ZiaHDwgM86DHPMUKAXOcpq+aST5eLdQuptK5Qlt/wf8Q7m5g/QHbIn1/4m177QmszbdYUfl6KY70NVNX5k1Hgfdy4SVxCOF7FObFIGwusQTCA4IgbEUmPf/3r+rL/IAM+XPMhvfLyi3ryA4RCWvp4iuZSUFqsFAu+uzNU6enJtm7eTD9/7ldGmRbpE8d1iYUiHvqH3/+Z5QGSORNkqeoWMWJLBqx6Y6jGY0h+IG6bkjpeYQZDZgowQwxam/df/vCCUXzZhYkZQ5/+8Y/XVNbHiBGJlDZxbJd+MKdX/vE6LV26rAuZ0hvfB7Jtz2d4IL2zgbF7u8oUX0pPrxDOfPgV3bX6LlW/LSMjg+6/7yt6wgBtQBr88vlf6ktdITPj4Ycf07N3KIuFGm5arTpcFE6fOs7pXrpgCspM/fWvL9B8zs5AnTjsf+WVV5kN/bE+CwPtcTHQNEk+/2yHYh01w0UFGRqzOM0KrOTFixfp10yQmDJ+hqcB4yDFSmNuMQ8wmyjrhVQoPNC+9NJLiuTBPME4Qhj0xz/+iZTCGuDPc3VdC23JuEBfZBboPXFn0mNMXAjdtzBZCJABPj8yvCAgCFiPQG9/m63v+fpb5vFvaW2tA5ekTGLSoqvgOUbQyI8P17zEKdTruJZupfp9d3d3Vw4UF2fTieMH6HjmTqqouESLFj9wXSQIMk0uF+XTxo2v0o7P1nD/l9QNb4C/TquiojJPjZd55AsO4F+mZcseociIWKtJF20+W7e+RRkHNqubZ5jhnBoby9Q8j2XuZn20z+ls9hJatOh+VT6qr7Nc1OD9bDjXQUER/IAdyRptZ/X3Vv3shl0M11vyQzI/7OK0ipOCgCAgCAgCgoDNIdDY0k7NrbqXq7Ms+OztCXJ0Mg7MI754+fIlLoX/Gp3JOqXvDqWf4uMTzHavZUYgSI9F3B6eusyRW2++RR8vRSkoTRcDC8l/87vf0+jRKdS6DdakAAAgAElEQVTBzzB79+yhp1mcHDFI9IVA/c2LF1Fcgm48+IPMBQToYSA/vv7E1+iHP/iBqoiDGObmTZvpe5wtYVilx6yz3WxElspLr/5dv+gcmS3PP/8crV59r4qFatooWiwU2R1vvPkGzZw1kyZwBsy77/yby4Z9Ti/88df6ua5evZruufse8mRMxowxztCw5ArGgRSBFpPFfL/61Qfp+z/4oYof4xxlZmbSz/7fs3ryY/P6T+jNtHTG4HtmszeAPfq7ZeVylS0C7GHIMnnyyaf1uOEZEponptkklny1te1CgPRwRq5llemy5Uv15Ae6hw4G2E3ty4Yv/uyb5hjpfIxmUZkZ0ycZpS9pAjbow9nZRTF899//EJ07l8vZI1wzb/58PakAAiOdRYQQbNC++OXlVbzyUlezD2W3Pt60weiiAK0Rw/JU+LJ861vfoqPHj3UpkaXBVJifr+rLaYbMDwh+anXgIPz5GJMhWj1AfJHwP7aZluPqAXrZ3ccItHF9+YrqZk4FrdH3jJuAiABvwj4xQUAQEATsBYFr+W3uj7nhhhMieLCgwGCLoufIlIBoNsiPRs6SuGnR3TRt6mIKCYmilpYmFtQ7zjpdHyhSYtu2dzizIJUmps3h3/7OlUOaELe5eZmWO0YmxoH9n9CmjW+p8WbNuY3vOZZwmaZ4NV5BYQ7/tn+gyAkQJBER8RQaco9V5bBAfoBc2fDxq7Rhw+vU2tLMWZ9pNG3aItY+S1PlvGAot3Um6zDt27eVzp45rFLBW/iBZOXtT5olW0znYDhPS3PXjtH2m+vD9FjDNqb7DMfsqR2ORckyF34IKS46Y+60yDZGoDfkx8EvD9Nb6/dJ2Sv55AgCgoAgIAgIAoJArxBwchxGKXFB9J070hQBopmXhxvFhvn2qq+h2jjz8An6wQ9+ynqxnbGi2jpdeabDB48alZqaMXOKEiq3ZIiBfu1rj6ogfXBwMFecaVOBeFQp2bp1qz47BAusoZ+hBdhZFVHpYiB2+cx3ObucCRAQLwcOZugJEGReQ3tZM2R+PPPMM/r4I8a5dflyLrmVQz/58U+N/Lbkr7nt0Hnet3O/fhfivhr5gY2aNsqBwweV9klIZCRNnpiu2iNOithrVVU1/fWqCDowSUocQStuW2FuOIvbjh7NVJIFmkFL5cc/+W99+V0sgIeuys9/+hzl515SMWbEZT/4YB3de/fdetxMB0B1IOiQGJIbiDlD31mLZbe0tPJzo04r2vR4e3gvBEgPZ6m3q0zBvs2cNkUxmZqBNQ0KiDAaadbU6eTl1SneiDaBgX76NvhgGdbQw4cYXyi8tHJSuFiAhbyYl0enTp6iNR+vpbIynegNOqrj2nEITMBqmSU8l31W3390dCzNmTPbyCe8iUtIosmTJlskQLLOnFXiOprNmDW7S/29wOAQoz7yzl9SPgoB0gXuft3gxmRHenIIubs5cQCsTY2N/2NDufyVrIbo13MhgwkCgsD1IdDb3+brG836o7HooLWtiVceeXJWg+VbLOhvHDmyW5ERc+aupDvueILi41JZEFC3Mi0hIVUF0osun1e/uSAOUkbrbqBBZsBQTgoCgqZi6dD4UKWn+MHCw92DHy48lB4HymwhA2PylMV0151PKXF2jVCBfoWXpy9nrhSr7I2zZ47SjBnLOEMkuMfJNzTU0O5dHzKB8r4iP0Cu3LbiEcIcUAJMIw5ADmEOIEU+WvsqZ4ps4RVdG2gkkyUgi9zdPfWEETJWMAcs6GhpYUx5TtBSweIPdzd3vsfy6ZI1os0bDmN/R0eb0jppaKg3Ot5Q58TwGCw0AVameKI/TdekobGBnDk93tz4aIfzBxwvFzmo1WZ44BLrRKA35MferHwhP+TDIwgIAoKAICAICALXhICTI2djB3txqW9jnQp0JsLo1kGKwLmWZdDdEcjqePSRR7vEBQ2PQVmlRx973KgEE/Yjm0ERBhygh42I4/JLE8Z1GQ6LvZFlgqwKkCCHvjxJd97VocpgIfO6uqpCHQNS4ebFN3PsMdyoD8RT582bz338WZ/B0WWQHjYc44Xi2jha3NdUEDwyKppe+evfycnZmeO9XupZoKeFe719ZkD5q6IiXVUX+LFw8RJVetfUsMh+3ryZ+nOIZ7zDRzItEiAxIyJoZFKiUTfANzwiUr8N2Fey7IFhpSHTcW35veWnc1v2uh996+nDauoKarf5+xkHDEwvsAhYhASHGpWEQhsvbz/1hdW+/KZ94z2+HKeY4fz8889Uaa38wnw6c/pcFwEe7VitrER1dRXV1+p0PbAvOMyfgoOCugyBD3hCgvGH3rBR4aU8VcNPM7CIuGCZGur0aYYAT1mpZbEh02Pl/Y1BwNvDhaakhtPE5FCjAZycHMj9ak3MGzOy9CoICAKCQN8i0Nvf5r4dvfveWlq6z6hDcN+ZA/nQwggJjVKZHyOGJ6uAvpaBAOIgbkQKhYWPUNkSNZwx0tjUyERGCWdybGStjUrOLp1F48fNYCKk88EOgfrzF7Jo395N/HBQT5PTFylxblhERLQiJyDMjrFBKGjjoY/EpAm8IitOESA1tVWKOLBGe+TcuRO0f/9nagHGpPR5ivxITZ2szx7pzMYYpggRZLJo1tRUzyLpTUaAgpTAww20UQ4e/IKFxXP1++HfqFETaNr0m1XpLG3uhvN2dXXhurULuHRYMe3Z/QllZx/T65DEJ0xUeAMDLy8/JT6/a+dHquxXUqIua8UQT21gkDzQTTlxYh9n44y2mhwymtgQf9Mb8kMyP4b4h0WmLwgIAoKAICAIXCcCCNA2trRRNet/QA9EM2SGBPi4S/nv68RXOxxl9f/7Jz9U1Wm6M2gXx8fHdWmCzPkKg1ghnkOeeupJcvLUlQXWDkBGheFi7zIuEdzS3EQOvDCqmDPRtRiqBwuzYxzENU0tIMCfAjkGiueM3hrisNB41MZB3Dck2JhkQZ8Y13Tht7Zwry+eX+FHzjkWI79KGHkyyQKdEU32wHBeIF8MY7tYWJZ15rRF8gJ6Ke4GC/m1vvxZ63mwmBAgPZzJvlhlirp1hoYvi2H2Rw8u6Hcj2wPiQKiPB8bPkCgBcQJiBYycZl5ubqTV7KtvaKKG+nr9Ph++MICVNGcuLg5miRhdaY9qvUA6jtWxwuZ66dwGwqS6pqr7RrK3XxBwdHCgNuo+ONcvjsgggoAgIAhcBwJ98dt8HcNf16HIMADhcd/931P9IEsDv9+mBpIEBq0OiNQ5caYoSIvCwjxVOut87llVXiqZMyq0xQ4oRbVl879VNsZwJlBAgCATJTQknO695ztGWSGm4+G9Kz9EaIZsC3PZEIbHgcBAua4L50+y/pkXTZ06X2V+ILPEUjkpzAGZIAEBoTwfN87+CFF/0R5Extmzx2jdes4Q2b9FaaL4+oWpbBdkhFy8cJyyTmfQ6dNHOOX8G3qiBdkehQW5XN/3DVVXuLS0SC/CjmPxKi7KpS92rOWHhi/VFGbOXKqyS87weNg+ddrNitwYMXyk4RTZL+in5NGWLe9y1s4OuuWWhxQBImY9Ar0lP16TslfWgystBQFBQBAQBAQBQaALAojBnTxfRhv3XKCKuka1393ViQK9PeiWmSMoZbhOB6/LgbJBj4AWYzSEBPf7UVExFB7qT6gGc9tttym9CHMBeMPjYsJYJ89MZnQzL/AyXKiNIP3mrdt6PAslXIYJZbRwnlGWy9A8OKvcnLmZGd9cO0vbmhp0WSbafuh29Mb64vkVBEh5hbH4fFBw14Xteh9NCI0WXiBnyVw93Pl5sytFgPjwYLGusxssM+ujefQFS2eaAQLXnJ17Bz0+6L/+7Z9ZMOe3euID9fGQIjYuLVXVlvPkD/dPuc4bGE1crG6EIQCDQA3IF4yBVDawud2Zp4s716OL7q6J7OsHBLAC4sCJy5RzuZoam3Wl0dxdHSkqyItmT4giTzfzhFg/uCZDCAKCgCDQKwT64re5VwP2cWOQAIEBumzRzgyJzkEaG+vpzJkjnGZdpITKockBosTby4dmcOAeWQ0gAnbvWc9pycNVqSpkTpw6uV+VlQJxMGvWUkVGYCyYVu7K3HgtXLoq++wRKsg/qbTE4kYkE+4xejKUhLrE5bywImtk8mgmEFJVeShL5IfWHzJB0M6BSXlDkqWcszY+++x9RUjg4WrhwntoytRl5OsboDJSQEB8tPZF9dfH15/8/EPY107CorGxUQm8w8aPn8ckx60Uy2RTW1srHT2yk95773cqw2UfkyspqVNU6a24EUk8HtHZrAwmck51EWVva2vh7acV3u5MpIAkwXkQsw4BIT+sw0laCQKCgCAgCAgCgkDfIQDdj8tl9bT3VCHVNrTqO06I8qN5EztL+vTdiIOvpyWscQEdCcPYpQvHMbFgCuWdkBlgjvgw95zm4tq5OLs7pPD8AZ1h0wwQw2OQDQLyRQvWOzh2yg+gXWurrpxWd+P0xT4spOqNmcOlN8ejLeYMosLQmpqae9uN1e37wmerB+uHhjcmSt4PjvfXEH3B0plmgFyL7yh79fG6tXryA3X2fvzj79PNNy+h4BBdOa0TJ06ooAfMtIyWr7eXWhWp2eXiSqrjzAyI8ZhaZSVneVxNqTLch4tbgEH6E9o8+thD9P3v/8C0C3lvgwjUNbbSwawS2nggx8i7eeNiKI3LYgkBYoMnTVwSBAQBswj0xW+z2Y77caMhSVBbW6nKWzXU1yhR8jNnDtKOHRsIAf1Zs1dwpsM0vT4FsicWLryDPnj/L6yjsVlfuikv/xyTB2uporKcFi0CcbBUlXnSzHA8ZG5AF6ShoZrLWtboRdAhTD5uwhwuEbVAZWX0ZNXVFcpvkAQoT4WsDi0bpadjTduBwMnNPUFHj36uDp08ZQktWvyAInegIwLCyNdnBftbwZmwv6LjmTtp0sS5irAwNGiowZfFS+4z0lWZlL6Ajh/frwiQvItnWYSwlPsMZQ2SdF7MkULnOYslL+8MY1KjSnXBMC50RM5wGTKU8xw1Kl2VJtNIJaOB5U0XBHpDfuzef5je3iSC511AlA2CgCAgCAgCgoAg0GsEUPkinPU/po+OpMbWTgIEGSB+Xj3f4/Z6wEF4QIBfAI0ZO9ZsOanupmvuOc2bMTe3MNyTdZE9vTsz4bHA+28v/6OLVoil8VClJjTER1/BBs8BxSXFZpujMo1hVRyzjSxs1DSbNckCjFNS0qm/rB0Gf+rqapmEaVWxWUNtaHO4WBjO4mYQSYb60tBfLCnu6ofWQQXrdWgGvZDQsN4tTO8Lny1OZgB2CAHSA+h9wXiZ+6L3MGyX3VlZp/SrGrFz2S2L6cGHHjJiXCs4FQrBEphpBoivv79KU8u82jOCHBcv5nW5sDQ0NNDhw0e6jK9tiIriTAFme7VSWxcvXDAr9AmBdszbHCNssXPZcUMRcOKbgABfV4oJNV65GujHqW68T0wQEAQEAXtBoC9+m21lriixlJd3jjZz6ap8JjEgRI7faNSovXnZ/Sxed4cK8muZEiA1QG5opZt2fLGO67V6cknKXSpLAUH6+fNXUTjf4JorYaUr6VRAW7e+RadPHdGPp7JGmGzBsRBINyUoTPECOdDa2sRZKmUqMxQZGR4ePWeNmPaD9+gLJa4g9g7Rd2S9QOPE10dXc1YjbzB3EBbIDgGRcf7CSSYsFqsSV5rhHgU6IeFhMUa6KsigCQoKUyXFWltrFfEDCw6JpsTEsUprBaW1QOhoBAgeYsrKLtHJk4dUW2inIONGrGcEIGr5+uuv07svv0yjEoOYQGqmLTv2mT2woqKKdmXmkp+3O82ZNIpiQ7zNtpONgoAgIAgIAoKAICAIWIOAC+ucpowIYiF043sKR9Zo8PXUZUdb089Qb9POWdROjr3Dy/Q5zTQ2aYipt7c3QXtCMzwDFRcXd4lT4p4cC8tNtT0Qb4yOjtZXqUGc8tTJ42ZjlLnnuRwaLxS7FsO4cXEj9eNg8Vdu7hleOK4TYtf6bG1tod/+5nn6dMdOSkhMosS4eLrrzjsoiXU6+sLgR/JInu9V7WhkyO87cJhuXX5LlxJjuBeHbrRmeEYaM2ZMr2K0pueyL+YwkH0IAdID+n3BePVFBkhzUwuvCu1kriH4gy+XVkcPJbK2b/tEaYPAkJ0BdlAzXz9djb7tn36u9qFM1tq1H/Iqz4lGrOQ2rre3Z/cBi6iMGj2KRo5KoL27dSzjtm1f0IEDGTR79iz9MbnnztHvfv87NT5Ed0YmJXFQZVaXL6TFQWTHDUHA092ZFkyMoXEJQSw6qxuCtWLVCgjsExMEBAFBwF4Q6IvfZluZa0dHB2cZVKqyVshCwEoeGAgQzaBxoRESIDWg6XEzZziUFBdQ5pEvFAkBjQscs+CmO1Tpq+4IDGSaQEPk9OmD+vFAgED3A9bO9wk9ZTn0VOZK77yV/yiR97Ii5U9oWJzSN3Fy6hSGRzeYO8peIcPjDC8MKecsFhxnSICgTFV4+HCjbZoLEEjHfsPVXyiDlTRyPOuq+CuNEZTBioocrkqGgZRB+auiy+cVKYNMHCl/Zd0Jhajllzu3U1RIIMUG+1FxFT7XXVP0q7Agp7SKaquv0Ip5Qn5Yh660EgQEAUFAEBAEBIHuEEBg3N2FywX5O3Zp1hcLlLt0Khv0CJg+p5mrLqM1hjbyLC5nte6jj9Uia8Qz3/vPOl4INU4fPwTJ8No/X6bd+w5QSnKyIjymT5+hJ0nGjBnL22Ip+9xp1a25GCXIgDff+Tc/O1jOlujpFI4bP14/Dua0/uPtdP/9DxmRGxcvnFfboZW8d/de9UyxaNEis12jD+iXaLrNZhuZ2Thp0lQlRYD5og9UClqxfIlRTBaE0caNG2j7dl1mPbqZNnsqpaSMNtOj5U2m59JyS/vYIwRID+epLxivvrjAmmZerF+/mdLT36HZs2ZSdW0dvfXv9+n1f/3TqHQVSBKNsQVTiC/eG6+/rb8w/PPV1/hB3o3uWn0febBg+s5du+mXv/y1IkcsWXh4BK1auYoy9mWosfCl+9FPfkg/+eFPuJ52imJrX/zbS/T2G2/pfXnw4a9wKYvJQoBYArWftru5ONLwMB/OADFeBSGZOv10AmQYQUAQ6DME+uK3uc+cuc6OoIMRF5dKX3n4h1TDJaVqaytUdgfEujdtfIs1NvJ55dBTKitDIyXwFyQHyI6338qlQwc/V9mZ02fcQlOmLNJnMJhzDSRCTEwC3bbyEc72WKnGu3Tpgio9BfH0wsKLat/EtDk9kiDo383NU5XAauZ7jusx1NGtqa1Siz3Qp4dnV50NZIpg7ppYe011JdXX15K/n3E5T3PkDcTgzZX1wlgoawXReBBCKIPV1DRXESAQYkTZLJS/Sh41Wel/mBOsv555D+Zjo8JCaezkaBo1MoEi/XWrBwsrm9X/hn93HDhJFwsKKcC7dysMBzN2MjdBQBAQBAQBQUAQuHYEEAC+UFRDx3NKqblNp3+K3rzdXFT571B/Y92Iax9JjjRFwPQ5rbsMkGFMVC28ab7SEgRpgBjj6/96he/t3ZXAuhan/Plzv9LHKUEqvPZaZ5msxMREunXFStZLzjYbo0SlnFf+8Xf6z9v/MXW1V+9HjkxSlXj++Ecd0XLyxFH67o9+QN/79jNMxsSoCjt//euLhO2azV80lxdQperfexjodwCXjEMZdPjLQ2pROrJhYmNje/RpNC9Kv/vuFfT8853zffrp79APfvBfNCV9ssJg2/bP6P+e/42e8AkMDKGH7nnQrARCdwOansvu2trDPiFAejhLfcF49UUGCDIv0ieOo82coQEDSfHEE0+pgAdWS+IhHV8g1HXTylOV8xcdmSFalghY1Acfupf+9xfPqzY45vnnf0t/femfqk/UxEMQAxcUSyQImPRVq1bRti2b9b6A2bznnnt1ZSWu+qLBCq2SR77yVRZV7RrI6AF62d3HCDS1tFNWXgVdLq0z6jnQz43GxocQCBIxQUAQEATsAYG++G22lXmCkAgJjqDgoHDlEn6HK6vKObtyqxL83r9vE4t1c9ZDSCRnfkToBcYhIh7Mx0HfC8fAgoPDrBIvR4mnqUyUwNrb26m6pkJlN6xb96oSGAfBEBoaS8Njk8yW0VIHsqHklUZGIAsFmiLWGsgMmLlMEq1PS325coapNmdLbazdDj+ALcpbgQBBGazLRXlKpL20pFBl5iBrRFdWK6pbPKwdcyi1iwz2oZQID3J1cebs01YK9dWVSQv11WUp+7k70mk/VyZAhhIqMldBQBAQBAQBQUAQuJEItLR10JmLFfTWttNUVccxMc4GgYUHeBDiH0KA3Dj0TZ/TussAQduExJH01FOP0DPfzVYxSmRpPPc/zzHR8GflJOKUWowTMc+77r6XF31N108A8c6Hv3IP7T+wQ2VdwMzFKBErBRFwrVkgICme+PrjirRA/5jXhrXraef2LyzGQp/6+hNGFXdimCjRslVwPBaWL7xpEcdL/elnz/6AHnvs6/p5WfoH833k0a/TkZOn1PgwkEdfffgxFR+GGWIGYfnvf/87tHTZzZa6tLjd9FxabGgnO6Twfw8nSmO8rudBuzcZIJYuDpGRkfTdZ77PqWCT9B7jIoAvLy4SIC2e/Z9n6a5779LvP/zlMV7JmKd/jy/KE088QV9/4mv6AAnGQx/aReBrX3uUHn747i4aIoYwgZX8ze9+T7esXK4IF5h2ocJfzeDrH//4W5o5c6bh4fL/ACFQzyLoO48W0j83naAX12WqF/7//MtCqq7vWpZigNyUYQUBQUAQ6BGBwbAaBcQDXtDkgIEIwQvZCyA6ZsxYRuPHz1OLHKBBUViQY4RLeUWx0v1ArVxfFimEHT16gHJyTql+TQ3jGI6n7UepLAiMT5o0l6ZOna82F+Sf5IzOi4TyXJYM/fn6+FJERDTf2HuqElyFBblcnrPn3xMc29hYTzU1lWbbN3NZq+4M2SbIxABRYi7bw9yxKOtlzgA/MkhGjkxTZa5QBgvzgBh6LpckQ/krlBYbmZxmNoPEXJ+yzTwCIEHEBAFBQBAQBAQBQUAQ6G8EmlvaeCGG+XvB/vbFFsfTxz0t3C/3xmetL8MS/t0dj0XWq1ffy/HMn6m4JswwTqmRHyAvvvHNb9D3n/mmEamA9siyeP5//4+mz+wkRgxjlCABnvzGt2nZ8qUWXWlr6/nzAS2Pv7zwYrexUJA08OOvf32Bn6/SjcaLiYmhhQvn6OOtmCf8xAL04uIyfvay7ikXMdm//OEFevKJrytSB6bFhxHb1TDDgvRfPv9L+roJEWPdKDrXW1osPw9aBNNGd0gGSA8nBowXamKjxpomMO7rrws0aIcGBvrpv6iooW2Y1oQ2yAAJDfExauPs3PUh0NPT26gN6uEZ2vwFC1Sq1xtvvK5Yx7zzlyhmRASv4pyrar5NnzGD3v/Pu/TZ1h36w44fP2aUchUYGEjPPfcczZg+kz5Y8z6nWx1TbdMmjqUVK5bTsmW30IdrPtQfjwAD/DI1XGD+9Y9/sojqFtq8bRtlHj6hgjCYP/qaPHky3bZ8OcUlJJgeKu8HCIE2DmQ1NrWpFRDNrbrgWHOLIzU0dWrLDJBrMux1ImDuhxI3En1lyCTDq7W187OC77obBx97Gge+9dTG1E9Lx2jz7E1/lvoyHVPe2xcCfffp7v95axkXFy9kqcFjhydz4D24iyPOTk4qo0MT7W5u6SQFmprqVYbIjs/WqOA8iBIQHxBB/+yz95kQCdbfT4BsqKurUkLrLS1NFBkVr7JNQLZoBhIAOhoBAREq2wG/56Wll7r4ZLgBx+OYEcNT1Kql4uJLdOLEPi6HOUVltHRn8Gffvi10+fIFGjt2Bt+nTFb3Wj7efvzXmUtQ1bNOh06k3LAf+IksE40gQSaIplvS3XjYhxJYlgz3OsNHsP7E8DEqAwZlsCKj4vTlr+ITJqoyZd3pqljqW7YLAoKAICAICAKCgCAgCPQvAkoEPS6Inlg+Th/7gAdeHm4UG6bLRu1fj2x7NDwVeHKGg0ZAoPxrgK/fNTmNvjzcPVWmgxZD9TPzrGPYOSrGPP2tb6vF0xs+XksHM76knNwCdTxinpMnTaZlS5epzA9kYpgz6BK/+drr9MGHa+jzTz+lEyez1XPSuLRUun35bTRv/jx64YUX9HMMDvM36saNF4wHBAep/Zh/eCj0CLs+P0yYMJ5e+evfadOmzfT5ri/0sVCMlZwcrzRNbl68yGwsFPP82c+e5WeO4bTu4/UqpqsdlzYhTfnjxIvTND/wHn6YM5Agv//DH2k1Z8RADxqYYc6I04wck0hT0tJVfBeVgEzjJzhHbm6uFBUVo579YJbOt+HnAu0s4W/OR1vb1vVs2pqHNuAPBLx37fpM74kTkxf+V0kQZ2cXeuZ7P+KUrW/p9/v5G39Aob8Bxu2hBx+y2AYfyAceuJ/uWHW7vo23SdkotMGXDTXf6lCuioOR8AUC5xgDdvvtq2j+PN0KTrx3N7g4aMFDfOnuuHMV3br8FqpikUoYxtI+yPUNDXr9DgQi8IE3/cLgGJApKH2FMQ390ZFA5i9KajCxAUHAzdmR0pND+ALnpIgQGP6PDfUi7BOzPwTwnS4tKWa9gGxqYYLC1Fz4Rxy1JJ2dnfia5a9e1v5goe/KSi6ZdvkyaxHk8A1IDl8vSqm+0UHV5IyKjKK4+HhV7zIsLKyLxk8DX0dyc3MJNTdhaBcZFa2/Vpn6qs3l4tWstXjuG9cYGITPCgvyVV1NzDMsPIyDkXHdzsVw/J7GNvVF3ts+Ar1ZtWJrs2ltbWLNjk/pvfd+x99Nb17x9E2+2V/aJZOhlVchVSND4qoouquLu5oKCJTTpw+zXscH6qFg7vxVrPF1vyIfoAeyd88GpVURFHgn3wB7MnHZQidPHaR/vfZ//H8t63s8xanWdyqdDc3AhUDwG3og6oGHMyFcXXQlirrDD9kXIA6gj7F71wb92PQabAMAACAASURBVHPnrLCoQwJyA/58+OHLKruiqblekUDuTKhGRsYowgfZJCUlBVzmKs2IdACZU8GZL6WluSrFOzJiuDrOGrOUAYJjQeaEh8XQiLgkRYCgDFZAYBjlXTyrHgrGjJlqVWkxa/yQNoKAICAICAKCgCAgCAgCNxYBxK+igr0oIqjzflcbsTcVWm6sl7bV+12r76Kbb16idwqxRMQ7r8UWLlpIu6ZOMerLXEzRsG/ENLGQGlkTdXW11MjxBBj88PD0shhHMOxjBC/AfoYr5yD+iuMRL/XiZwZNFuDpp5+mxx59VB2ixVK14wODQ9Qib8RZze03HCeU4x9fYa1jYIbnMS02a00sFMea+mi4uLQ3fmBeIH5AHJlihgX13WEOYmTL5o/10wLOWOBqauY+F931a3q8Lb0XAsSKs4EPFT6k5gwnHoRCTxoXfdUGPsAf7Qts6pMlXxFAfOuNN+jgwQMUn5BIMczGxifE05ixY40uJGh37ly2vlsEIiDA3p115093x8m+/kXA28OFpo+NpIks+gVz4lURbVwbE/UwsUJCzP4QQGBz//4D9Ovf/VqtHkDgUjONycePKVY3JCQm0cxpU/iGYirXuo/r9pqFbI/s7Gz65JON+tUThn1rYyBIOm3WFFq96k6awjc4IIa1H8OiS5foN7//ncpIgw8Q6nr869/gAGekWaCRKafNxYfL6fzoJz9VP+Tor6G+jrPNNtGLL76qVqbPmDmFnnzyKf1+cx0W5OfRSyxCtm3bF/Stb39DEcw9XafN9SPbbBMBe84AgRA3iIOG+nrOmsimffu3qGyDEUwCQHgbhvJQZ88eoSOH9yhR8ODgOPLzD1F6GZeL8jnLY43Sqxg1Kp1mzljOv+kJKlvzwnmuBbvhdUWOREXGc3bFdP4OOfF1XnczeybrFO3ZvUntGzUqTREksBYmWc6dO8H3CF+omrEjWBA8kttYYxgbYuooHZWff5E2bnhDHTZhwlzOfg3Xz6mNBShRtuvkiQNKa+TsmcNMOCRSUmIarxLDA5YbkympaiVSQUEenc0+TOPGz1TluTSrqCyjM1mH1X5cf2JiRjIR6sPp4j2nrHeXAYL+0Q98wfiYC8prgaAJCx+hRNLNCahbg4+0EQQEAUFAEBAEBAFBQBDoXwTUYr7aZiqqrKdWjnlo5uHqpPQ/fL1099z965Vtj4aFktYuluxpJtfTl7UxVnM+4BkRwupY4G3u2V+Ly2qL6QyfKUHAaAswzfVtbtu1zrO7OV6LH931Z85vbLMUOzZtf61zNO3HFt4LAWILZ6EffADLXVJaTC+//Io+uwOpXX/4w29p2vSpKs0KQY9PP99OH7z/kd6jkaMSKDEpsR88lCFuNAIIMKPuZXW9TixXG8+z/Qo5ebmQA9lzSPFGo2e7/TdwoBTkR1lZiUozNU3lrK9tUfvPnD6nyIjxk8fSvXeu5hXji40IC22GID8OHMigV//1T9q4/hO1GQFBpHOGh+tEmiuqq6iksFClWH70wVo6l32WHvvqY5xVdqvRTUNFVQUVFfEqbq5tuf5jTxo1egyX2rvN4o0V5lJaVEn1ni1dMlpqaxtU0LO8vIS2b29QZXxA5ISHR5hd2dDS2kZ5RZdUWZ76+lrbPYHi2TUhYM8ZIE5Ojvy7OoEmpS9iYu8d+mLHWi4x16Q0OLw8dSn5xcXnaceODXTi+AEKCgqh9PQ5Sgi9traSDuz/hDNItioCYMFNdyjxbp2ORyiTgreqUlggR0CSBIdEc5ZErMqwmDZtkQroZxzYokpMzZq1lEteXf1OV1xW5Af2IbNiQtoMCufsCmtKPoEYmJS+gDMzLtG6j15RPmOcyVP308iksfox6uqrmfQ4qnzH9xLkB7JRMG8tGwVlpiZPWcLf9ZfZl82sMeLP2RezeNWXjyqJlZV1kHbuXKM+M9Nn3KJwBJ4tVtR07i4DBP2hH2TOREWnKOF5vGApKZOsxkIdICYICAKCgCAgCAgCgoAgMKAIQAQ9M7uE3vksixpbOrXxIgI96d6Fo2h8QtfyswPqsAzepwj0FNnqaX+fOiOd2QwCQoDYzKm4sY6AEZw7d57SMsk+d1oNVsBCO4888oiq3a0ZgqiaYA4EzletXMVB1Zgb65z03i8IQAR9x5F8Onym1Gi8lPgAWpw+XFZB9MtZuHGDIEj64EP3MrGxSJ+qigyRstIyKryUR0ePZKpsiO2fbKf83EucIlnD5evuNCIssFLm1KnTivwAsYFrw3LWF1rJ14ExY1LVSgoYBMLyuFTV1q1bWZPoHTqReZL+8c9/qBJbSzhtFqsJQHp48qpziCPDzudm86rv9UykjDZbh9JaZCBghpJAn27fSpMmpnRLqGB8ZLGJDT4E7PmmFVkcyIxYsuQ+JqWbKIMzQD7d9j7t+uIj/ecVCxJgIDVR4mrK1KVKb+PEiQz+Hn9AFZXldMstD9GUKYv4+6YrOQmyIiEhVREbKCG1a+c6Gj48ia8J93DZuiCaPWcli45XqDJVhw5+Tscyd+vHw3cKmSa4jsyavYLmzbuDyQdjvTNLnyKMi/7nL7ibyZMAlX0CAuaTDW/Sdi6jqX0HtTFAsEyeslhljYA48fLqrC8cGBCqxq6rrVX+v/vOnxUR5MeaJlVVpYpYgc2Zu5KPv5PLVkWrrBhrDBkgKD+GbDZkpZka+gHJNCp5rCI/QLZCODA1dRp5e+mufabHyHtBQBAQBAQBQUAQEAQEAdtEoLaxhS4U1VBFbZNy0JnvWbEgtLa+58xh25yReCUICALXg4AQINeDnp0dmzZxEj338/+mZ3/2Cz0JUlNbTXiZWiAHAR76yqP00EMPmV1dbdpe3ts+Ak0sfJ51sYo+z2QdhVZdGqiLswM1co3DOeOjSaTAbP8cduchAnrQ5UhNTTWq3YjMn/a2Vlq6tIxXkU9RRAUIi3+98SavDI/hFeQL9CX1oPmxbdsWlSkC8uMuFtR6/LEHWMArqUvNzdGjU1RGSACXvfrjn15UWSZffrmfpnIpLK1kYD2LNqOf1JREulxcyaV3DtC2tC0UwRoelsoKdjtHDgAjGwVCYAe/zOwTQqW78WSf7SJgZczbZieAEljJyVz6iTMboC9x/Ph+KikuUDocMAT8IyNjaeTINBo/YbYSLW9ra+FyUm4qk2Pu3FsobeICJh4CjcTMQSbMmn0buTApculSjj77AosgUKpq5e1Pqj4xXmnpZUUqwECuRPP+pJHjVekqEAvWZH9oAENDAz5CWwTloo4f30W55892mVNwcLgiZcaw8Dn8MdQhQV8YE6XAbl/1hGp35uyxq33UKx8nT12sskowd2Sxaj6izBeyN+6+55vKJZQUwzZDw/uRI9Pp3nv/i0kXH5Ud4+BgXP4RZbDQNwz6K0GB4VezTK6t/rGRA/JGEBAEBAFBQBAQBAQBQeCGI4DnBCdH1ndj/Y/poyNVvEOzqGAfCg3oWefuhjspAwgCgkC/IyAESL9DPnADopbc6tX3qBXYCHIePnyEy0nkqLIyMGgGxIyIoMmTJnMQYyHNnTfXYqmagZuFjHytCDhxoCfA15ViQo1XsuImAPvEBgcCCAgi2KkZSps5Oboq7Y3bV92uNoOwAAmybfs2SklNYYHyWK6hf4VL5+QozQ9kgt209CZafdcK1g4ZaZYExRiokakJpRUUFtDUKVOVSJmpJTNZMnqMB/3n3bdp3cfrmRAZY0S8mLa39B4rt6PjImjezDlU01CvCJWNqRtYByC61/U6LY0h2+0DAXvOANEQ1gX7RyoiYMaMZfy9u8TZTboShSBGkI0AQgPkAgykyejRk9QLpm03PGPYFhIcQbfe8hXOjNDRRNiGfzEeSAOQG9OmLaYyznBAWSmYNh4IgN4QH6Zjg9BISUlX5E41Z5tUV1f0egzMc8TwkcrXWs5U03BxdnFRxBCyREx91LAcHptkERu0mTBhFo1nXRGYOfzQJilxHGd+jNVPzVw7w3nL/4KAICAICAKCgCAgCAgCtoMA7pwdOfaVMiKIhdC9uTJB59IpVxdH8vWUCgG2c7bEE0Gg/xAQAqT/sLaJkRC0nDABKzzHcymMGmpsaOCSV81637y4NAVelkTWbWIS4sQ1IeDp7kwLJsbQuIQgo+P9vNwI+8QGPwJeXt40b/48rvV/QJWkyjiUQRcv5vFq6WiVJZKTc45ycguUBsCsqdM5iJlslvwwRCowOERPrHh4eukzRZy43IxmIEpAjpzNPUH7du6n99a8z6V64iySKz2diUmT0qmBr11//9urtGnLJkqbkHZNhEpP48h+20XA3jNADJFF0D3AP1iVkTI0c4F3c9ssnSVLbTGet7e/Uekp9GGpvaX+u9vemzlZ6gdEiCkuPfnY035r52lNP5b8tvXt0Hmq4xJrzs7O6l5P7vds/YyJf4KAICAICAKCgCDQWwRAgrg68z22jzu1d3SKoDvywk8XJ1n82Vs8pb0gMBgQEAJkMJzFa5yDD9fzxwuGYNJgWFF7jVAMicOw2mF4mA9ngHgbzdeBVwcPM8gYGBJgDNFJggANCwvjkjrjaP36zapsVS5nfUyaNJEaGxuZAMlV2gIoMzUyKZkDpMafFXOwIbNMu44Y7ocGiKGNGj2KVt26UumP7NvFpbAmfkZhLGBu7lhz4xhuCwjwV1onBw4f7BNCpafxZL/tITAYf6/6O+jeH+P1xRh90YftfYIHzqOioiJas2YNa0OV07QpaTR2bIq6FgsZMnDnREYWBAQBQUAQEAQEgb5FoK29g87kV9Le45eosblTBD3Ax5Vmj4vqEhPp29GlN0FAELBFBIQAscWzMgA+DcZg0gDAaNNDNrewBkheBV0u1Ynras4G+rnR2PgQcmOCRGzwI4AgVwjX4Q8MClLl7ypY9wOi5m1cG7W8vFwJjENjwz8goMfsj+7QMswAQTtvb132SeaxTHrzzbdpw/qPWPtgDIs4T+71CmQXZyeK4ewUU0IlMIjL43C2idjgR2AwZYAM/rMlM7QlBJqbGikjI4M2fbKZPl4XQfFxUYQyhTOmz1RkCK6jkh1iS2dMfBEEBAFBQBAQBASB3iKAslfnmABZtyeHauo7K56gHHhStJ8QIL0FVNoLAoMAASFABsFJlCkIAtYgUN/YSjuPFtLWQxf0zd2Z9JiUFE6xYb5MgHTVbrCmX2ljfwh4enqSp7eLIjtQSgrW1t5OLc1N+smAsLgeM80AQV8otbVs6TLKOnVSiZivXfsh64/EKA2S3ppWzguEygfvf3RdhEpvx5b2A4+AkPYDfw7EA/tGoKa2mvDKPnea9u7LoI0btigyJC4+nsIjIlVpweSRSeTr70++fv768ob2PWvxXhAQBAQBQUAQEASGCgIuTo6EeEdziyO5uuhCn3gvJggIAkMTASFAhuZ5l1kPQQTauPZlY1Ob0QoI3Aw0NLUOQTSG7pQhdg6rr9WJLd8oJEwzQDAOymVN5oyPW5bfprRGEHAbN3Yc3bX6LnJxdeu1KyBUVq68vU8IlV4PLgcMKAKSATKg8MvggwwBjQw5fz6bnL7YQy4uzlye0J9SUxIpffJEmjp1ltJtQnaIv//1ZQcOMuhkOoKAICAICAKCgCBggwhA5yMlLoieWD6Omls7S2B5ebipxZ9igoAgMPQQEAJk6J1zmfEQRcCNRcDSk0PI3c1JESEw/B8b6kXYJzY0EOi4coWqqqqpob6enF1cVTAL5sTCyBoJUdNQT7W1tdcFiLkMEHSIzI2FN82njC8P0sb1n9Caj9fSuPHjaTRrhPTWQKiMHz/OLKHS276kvX0hIBkg9nW+xNueEYA4OV6tXI7wRlpVdQ3VtzSaHQLXbfVqc1LZIUVFBSo75LXX3lVkyIxZs1VmSAxn7oWHhwsZYhZF2SgICAKCgCAgCAgCA40AtC+jgr0oIshTuYKSWE6OuicIaKCKCQKCwNBDQAiQoXfOZcZDFAFvDxeaPjaSpqSGGyHg6OBAWCEhNjQQaKivo9zz55TYeYB/IIWGhOo1OMLDwxQpUlpUSYWF+Xyj2HHNZU/MZYAAYdyMxg4fQbdzFsi57LN0NOMYbdy4gSJ47Gsxc4TK6JTR5OEhJd2uBU97OUYyQOzlTImf1iCAzLzMzEzavXu3viyhNcddS5vLlwopP/dSt4dqBDb+gghpYFJcI0OQGTJ+8liakpZOY1LHUHxCPMXExChyG9d3MUFAEBAEBAFBQBAQBAYaAdxbVdY2U36p8aI+Z457RAV5ka+X60C7KOMLAoJAPyMgBEg/Ay7DCQIDiQB0QOpNSl55ujmTk5cLOZAELgby3PTH2LgRzMvLo8OHj1B9XR2lTxynglfOzi5q+LgRCYoUKS6+RKdPHafy0vkUGtY9MYE+8/Pz+JhiioyM5PbhijSxlAGCcSDEPnXqFLp58c3097+9Spu2bOJa86Oouan3Zbk0QmX1qjv1hMoGziqZPXtBf0AqYwwQAnK1GiDgZdgbggAy8zIzD9PfXnqFr6cXb8gYhp02Neu0n6wdSLuea6WyitYX0M7tX1BoaASlTRxLd/D1d978uZIRYi2g0k4QEAQEAUFAEBAEbigCLW0dlJldQu98lkWNLZ0lsCICPenehaNofELwDR1fOhcEBAHbQ0AIENs7J+KRIHBDEKhtaKHdxwro8JlSo/4TY/zo1ulxsgrihqBuW52WlhTTJ59spD27D5Cnl5cqZxITE61W7YIESR41kkaOSVQBuE937FQkgh8L4IKwsGSVlRW0Zs0a2rZls+rvgQcesErUPJgzT5Ytu4UOHD5I+3bup483baDEuHhLw3S7Hf5NnJhGK25dTn9+4W+0/uPt5OXt1+0xstO+EZAMEPs+f33p/RUmD4YNglIGXl4+FBzmT42N5stT9SVmZWUl1FsSxHR8kB/4vUhKHMG/H07UdoNLd5mOL+8FAUFAEBAEBAFBQBDoDoHaxha6UFRDFbVN5MzlnmHNLW3U0qorB97dsbJPEBAEBh8CQoAMvnMqMxIEzCLQxOJfWRer6PPMPP7R71BtXJwdqJGDFgvTY0mkwMzCZncb29vbCVkZWFGsWXtbKxXm59NH69fT229/QDU1lTT7pjm0aNEi8vXzV81AgqCMyZIFi1VZqhOZJ+nNd/5NPr4+NG7cOEWQGJY3wRggVDZt2kxvvPEOFRTkUfLoFKvxQl8jRybRvXeuVuVY9u06QCWFhSr45+Gpq9VqdWfcMDA4hBYuXKyyW7Zv/5zWfby+N4dLWztDQDJABv6EDRTxgGtcQ0MNXS7Ko4qKYgWEq4s7RUbFk79fIF+rLBO2A4+aeQ9Qi3pK+mTy+K4nNTTWq0bOTq7U2tbc5//nMcG9Zu0a2rt7r3lnzGxFSUMnJxeKjo5VpMcELjM4duwkRZrjd8PD04swBymBZQY82SQICAKCgCAgCAgC/Y4A9D7CWf/jprThVF6ry3x1d3amQD938vNy63d/ZEBBQBAYeASEABn4cyAeCAL9goATa30E+LpSTKiPWvkAc3VxYnEwH8I+MftGAMRBbu4Z2rtnL4uZdwYAKysqKCc3hzIyMlTmB1b+zp0zgx598KuUmppqpPGh6WmcO5dNb775thIpr6iqoFW3rqTp06dSWHiEAgkivTk5ObR161b64IN1dD43m6ZPm0zLli6jsB5KZhmiDJ2O6dNn0IKbTtLr/3qFivfp6tIHBgX1+mSg7BYIlRUrllNWVo4icGDQNBEbfAhIBsjAndPW1maqrCpXDvQ34dDUVE+nTx9mrYyP6eTJQ3wtqqWG+nomcsNYmDuJpk1dTJMmzWU9Cj+7ygoBcRCXkKBeN9rOnslSvwc9mUZ6BAWF6DU/UKpQIz18fHx66kL2CwKCgCAgCAgCgoAgMCAIYGFGyoggjnV4KwF0zUCMBPi4D4hPMqggIAgMLAJCgAws/jK6INBvCPiyzsfiycMpfZSxpoMPi6Njn5h9IwCB2tdee1eVfzI0BAdBjiDrA+K1K267lR584EGaOWtmF6FwTU/jwQcfopbmJlq/frOq837meDbFx0WRr38AuXq4U3NDI+WzSHre+UvU2NSgskmeeORxmjxlcrflsswhHBkVTSuWL6FTxw/S9k8/71Y7RDveUtqyMaGSR+XlJeTjLblN5nC3922SATJwZxDkx9q1L/E1oplW3v4kRUbE9gvZAOLlaOYeevedF5gEOagAgGYR7Pz5k3T2zGHKOfclkyLNXL7vVnJz630m2cChOvAjg/Dw8PBUpLG7mwelpiSqsoaa0Hl4eLhofAz8aRIPBAFBQBAQBAQBQcAKBIbx4hJXZ0ezZb5dWAhdTBAQBIYeAkKADL1zLjMeogg48gr5qGAviuBUUFPDPjH7RMDD3ZNiRugyMzCD8rIyo4m4u7urQFZcfDylp09RmRyxw0dYJCqgpzFm7Fj67ne+S6NTxtDmT7eoklgHv8w07pcDZBgXQuYopYVsEhAQmrm6uVNMWARVjChTQTNLhsyN8ePH0e133E41DfWKVImOizDKYsGxAdxHVFSM6tPDzXLaskaoFBbk0OEvj6lyWv7+QoJYwt9et0sGSPdnTpPEQCU8U3kMg+p4qhPT/VrPpu2wHWWvSksK6cjhPUwweHApqmp9H6bte9Nv97MhQtmr8xeyuOTe24r8wLVg4cJ7OOsrXR2am3uctm37QJEgn322loaPGEWJCWP7hZjpyXdb3q+RHiDH8TuRPnkijRo9hnx9/DijJoaE9LDlsye+CQKCgCAgCAgCgoAlBNrbO+hMfiXtPc4L9po7RdADfFxp9rgororhbelQ2S4ICAKDFAEhQAbpiZVpCQKmCDS3tFPOpSrKu1xjtCucSZHkmAByc9EJg5keJ+9tFwHociCTIz4hnlc9mxdzgzitC798ff3Im0uWGJIUlmYGUmI4C5I/8MD9THAsotzzFzj4eI4qK6uppaVVERpRkZFqXATIoCOCYwwNpbBAojQ0NVEwl7RCjXhLhtJbt99+J02bNlPNw9PDTZXb0urJ49ibb15Ck9InKfIjMlon3G6uP/gxadJE+uUvfkHVtXVKnLen8c31I9tsGwHJADF/fkBQNDc3sI5EA2tIOCmSoomztKprqlVmRFBgiFF5KK2cVX19rVGHvr4B5M2i3IZ6GmhbW1fD14OTVF1VRMRlp6DBUVlVRh7uHkyqeugJB02nQxtX69zT07vXZbNApLS2NtHxY3so88gXKuvjtpVPcSm/FeTt7a8InISEVP4/QJEfwcHhariOjg5yvCp4aR6tobkVpEdggD8TRKNUZh9Ij4kTp+qv515eXl00n4YmUjJrQUAQEAQEAUFAELBXBFD26hwTIOv25FBNvU5TDXNBOfCkaF7oIQSIvZ5a8VsQuGYEhAC5ZujkQEHAvhCo5h/+Tw/l09ZDF4wcn5UaReGBnkyAdK7et6+ZDV1vQRAEBgaqV18b+kaNd7xiRsTR9Obp1NbWSbIgUwQvS4Z91taz72keIDVCmVDByxoDyWPt2Nb0J21sDwHJADF/ThD0h0bGMSYLvJjASE5Op+PHd9GOHRsUGbJixSM0c+ZSJWhdWnaZTp44QPv2b6G8i2eVngbM2dmbEhPHUlraHBo3fiYTDsEqA6Og8ALt/GItHTmym4qLeTUdl9ZDRsbBjO0cRL+Jxo+bocpOQacjJ+cUHTr0KftyhEpLc/X9ajodKalTmJgMtypDA5klZeWldOx4hiq5N5l1PqZMWaTIDxj243+UvRo/YbbahtJ3Qn4oKLqYJxMcixYtUa9RLGIeGBTMnxWvbq/nXTqRDYKAICAICAKCgCAgCNg4Ai5Ojix4bvy86m/y3sanIO4JAoJAHyIgBEgfgildCQK2jkBjU5vRCgj429DUautui38DjAAICCcmFRB0vtJxRZ+ZMcBuyfBDGAHJADF/8js62pioyKHNm95Q5d/OnD2mdDFQGi8sfITKAoEha2PTJ2+odiAysC84OE7tA2Gxa+c6OnVqD9XVP0ULb7qTSRE3FhtH9kcWXeAMkAYuVwc7m5VB1WFxXHJqtHoP8kPT6UA7lOAL5f0gRqqqSimDyZbjmTtpyc0P0s1LH7SKBAH5UlqSTwX53B+TOGPGTFVZJFq2C8Z15KwGdy4HiBfMtByX2iimEAgMDqGFixYqgqg7ElvgEgQEAUFAEBAEBAFBwF4RgM5HSlwQ3UfJ1NKmK4EFQgS6ILFhUh7ZXs+r+C0IXA8CQoBcD3pyrCBgRwi48Y99enIIB5CceBVtm/oLS4z0JewTEwR6QgBBZwjKiQkCA42AZIB0fwZAalRUlqtG8QkTuWTUVCY4Ilg7I0ltyzy6m3buXKPaLFp0Dy1Zch+FRwxX+y6y1sZ77/2FMg5soU+3f0BxI1I4kySNYmISuBTdvapNzRcf0Yi4RFq69GGKjEpQ/YKEOMuEy0drX1U6HaNGpauME2R7ODk50+VLF7hE1fvc5/uKeAkIDFPkSk9i5SB1SksLFYkDQsXL01fpgRQW5HImynku+dWi+sL84uJSubxTqGR/qLNk3jRC2/xe2SoICAKCgCAgCAgCgoD9I4AKA9A/DfE3rnLh5DiMHCwJ1dn/tGUGgoAg0A0CQoB0A47sEgQGEwI+nO45e0IUTR8baTQt3ASICPpgOtMyF0Fg8CMgNFzP57i+rk5ldaxY/gglJY3lzC0nfjkojRBkgnh7h9KECXE0f/4qRXA48ao4GLQ/CgpvpqzTGVRclEuFnFGC41FmKmV0OmVm7iJnF1dVKgsltrAPJbXq6qpU2SscB50OkB/Tpi1SBAeeM/39gvg4F+7voiJXDh3awePPZT0hXdaGpRm1t7dRbW2FfvfZ7MO0Zcu7TNQcV9tA9qA0FoTRJ09Baaf7KSY6QUgQS4DKdkFAEBAEBAFBQBAQBAY5Ah1ctaC0upEul9dTi4FWpqe7C0UFeZGvDZTCKi8vpw8/fF/pbIoJAoJA7xGIZm3YFStuMcVxHQAAIABJREFUs0rnFr0LAdJ7jOUIQcAuEWhv76D6xlaq55JXEAVTFwAmP1whkO3l0kXE2i4nKU4LAoLAkEBAMkB6Ps0uLs6chTFBZW64MGGhlYVCOavRKcgI0ZHhEBAHMdJ2tTwAiBJkUwQGBVFBQZ4iH0BCoA+Ym2snYQFCA+THMGY4IHiOElk1NZU0bsIcSkyaoM/uwNhoA2Ji/PgpiiRBSSuUtgoPi7ZIVqDMFayOxddBcuCVcWCzInZQRguZHy1M6KDUF0prbdr4Fr9vppW3P0mREbFWaYz0jKS0GOwI1LQ4qCl6ctk4WRU62M+2zE8QEAQEAUFgKCDQwfeQJ3PKaO3uc1RZ1ymCHsHap/cuHEXjE4IHHIbqykr6za9foOxzpwfcF3FAELBHBJZwWd9Fixb3LQEC9tRaw4VGM9OHCMN91vZnqZ25vrVtSHcz9dncNnN99+Sj6bjm+rBmm6E/GNOwX80Hw/mY9on5aX2gPVbwW1oRa4iF4bj4H6b1pf1vOhbe94Sf6RzM9dHdNtP+tfeGf02PN+e3ISbKbw64mPaN7Zq/hn+xvb2tUw/DkUt2dHe+e/qsaP1p/Wh4m86jv96D/NhxJJ8OnymlxlbdPN2dnSkxxo9unR5nE6sg+gsLGUcQEATsGwFLv3f2Pau+9d7Hx5/Cw4cr8XNDTQxoP0RFDlfEQ2trE10uKqBz505QcwtnUrC2B7JDzl84qUpOwdDGGqtmjY/iojzV1Mfbj3VCqqmk9JLRoS0tLUpPBKWs0D9KW6HElSXBcpAmra0t+j6Q1YLMldtWPkITWaTd2RnEzhUqvHSRNm4Mpg/XvEx792yg1NRpFBoSrvaLCQLdIXAkK58yT+dS/PhZNGr0qO6ayj5BQBAQBAQBQUAQsBMEsOCztrGFsvIqqLahM8bT3NJmlBEykNNx5FiMmCAgCPQfAj1mgCDIfPnyJaqvqyVPL2/yuVJCFe0BykMnJydeMdhGXl5evDqvjioqKnmlYK3eez+nVvIKieHV5m1U39Ckf4htatIxsFilh4fhRl8Hcq/u4NWFLupYbMP/1e5t5NvopP4Gd3iqVX0urq78MI/SCy7k6eGm2qNvjBsWGkyubu4U7OZMlW1XqJnLIniyb20soOnvNIwKqmrUflhzU6P6Hz478wp4WCunxuFBW/Ov0dVbbXdvrlVjerAIMNpohuM83NwozFtXVxBjOnFgoYn9BDYw+IALmxv7DcPDOwzHFNU2qP9hrhwMKCkpVf9jLnX8iomNIX9/f9VngKcOmw6XzpWXDi0cqHB0o9zcXA40NFBoaCivitQx2c7tjDfj4swY1bRc4UBDKY2ICCHIP+E9DAEInL9KZp5xHI6pqOf5s/8Yt4jPO845/kc7WCsHztvwYky1ubXz+4amJoWFZpgPDL5r5j2sQ/2r4aT1gbY4P+2txWq/o3Oo+h9/NcM8DA1z09oY/kWb+rpG1bSmwZF8PNrJx384lXLWA/xsqymnqrZh+s8P0iFxTnPO5VB5dS3V1+jKbHj6BFB0eIj6HMbERPE8jL8qZRUVVFZaRm0OxRTmGkZBYe2Uf76Aqh2Mgy2+HV4cgIqh2JTxAy422tTaTlkXq2jLoQtGWDa2RtLC9FgSKTAjWOSNICAI2DAC1i/LsOFJ3GDX8Bvv46u7XzMdShMrP5ixnXJyTvF9D8pidd6/NdTXU1lZibqHssZAQtTWVao+IJB+6OBWys8/Z1bfA2LoxcWXVBktLbvEElFhWqIZ/kydOl+V4tKOAUkCMmfmjOV05PAeOs/i6ydO7KNx42dyKa6BX91nDX7SZmAQAPmxKzOXAhLS6a7VrIMTHqEWzYgJAoKAICAICAKCgH0jgEoX4UGedFPacCrnuBsWfsIC/dzJz8s4tjQQMzVdsD0QPsiYgsBQQ6BHAgSAINBdVV2jsHG80kjVFTnqoRY0hpNPIBMkl1UAGYF0VzddoN7Tr5kuVzRTeJuzIi6qqqqZJKlV+x06aqi8rYbqiq9Q5bDOALmfh+5BvaqhgsvyePBFyo20/8O9PKiNyxyEhoQyEeGuAtOXmShBQL6hkYmAdl3AHaQLKujBv6K6Fgrj0j6w8qZ6RWyAyIDh/yYnL6rkQLbz1Ysh+sIcYPCz0qWT7Ai4Ek4+7mVUVtlMzo6N5OHuSeE+LuTiF0AFHFRHoB0z8eZg+yUmVTyvdKbZefA+hk0F5BF0gJ3I0/miiX8yHByEKFD7EIivau5QY6WNT1TbvIddDWJcJTXURjaQFi5MxGRdzOfjz9GYMWMpLjiA8q+SK65XSZm8vAIdmcFEQzXvA+ng4zKM2rgKGggXkFgMMPeoKwOAbUXFpRSFwD8fgw9K7RUukcHEB8iQ4mIdWaGRQiW8QNOfz5vyCeTUVdJLI6mwvZ77CnarZfLJm4kGnTgrtmPu7BSVuzqo8xITAeLKnWoqL6i/sHY+qSA2tPeqP/W+WP31b7xMNe7N1NCh86G2tZ6qLjvQZW6XNs6dqgt15/Us/63k4EtkRAx5En9uXPzVZ/PjTRuooqqzxng7/zAGe4ZQSnTk/2fvvd/jOM9swbcbjc650QAakQBzDiJFKidbsix5NHIYT54767s745m7e/+K/WX32Wefm37YZ3eeO3fGY8+M7bElB1mylRMpSqRIijmAABG7AXRC57TnfNUFNCGQACMAsl4J7O4KX331VnV11Xu+c45MRS+oPgRE+6G0+V1ybnRKkvlx+WLklGwJdUJHslWGUSyaSdmlEBuXWrBJWgIeOfDIw+qYkwWy3GGBxEnQZ5Pu1rmCls1qkRDAOM4zwsiAkQEjA6slA0aJculHipJWjcHfpI8/fkNeeUUzK3eAIbIGRufr1+9Q91YcaDI6emXWW2PpW5pbkr4c6fQEfv+0gSTz29iwcY/4/WEJBiPzZ131mcwV3lPYbNq9HPva1tanWC2NQQZJuLVT2tp75NzZIzI5FVVAjAGAXDe99/XMRvDje9/7nmzZstUAP+7rM8LYeSMDRgaMDBgZuJcyQCWPrX0tMEL3zMp/c/8IjAS9Wo1pOffXGHCxnNk3tn2/ZmBJAAiL5X4LnkIzSUmpMn89XD7F7hgfG1ej6N1gC7S2tWLZksaiwHOtHe+jqKvrLAvqODOaxSu1MGQVJssK7CiU5tgQevMEP4oTKFyDCJAo2MVvAwMDRXqQHRTwoRgbqmgv4rRVFdDCbSmWCEb5K+ClPCOpCrbWVFLgRzSjsRDYg7HCMHqhPUTzoZ9tEvjwef3SEm6RIYAKcbPGuCjmZ2QS3SX4wfldAbsGoqCQoIMYQymLdHpq4vO4wYiwKFYGYzxaUTlhsMDvB2jC4HoEkMgmkJkpCTurqiBfNnllBvs+lowrhooG2gQVY4Rsko55bAgyOxhkMxCIIiDC91YwRIBxiwXgRqvLrDFMWrXRkGT0ANFR65Hh4UAecxbbLDIeq0tf8PgS+NCDTI044BAWJBg6I4YsmWaPRwN+0D8yJmbBD5w3ZF20BLX9JgiScnQp1gdBDgJDTRGnhMayMjK7pavf6KwOfao1Bj8LdP/iheOydt0OOT1+XExmu7RMI5/BKanFKdmRA8ulVwEkCQBxGthlwXnq1dhGyA/PCyvO2aDPPwuABHl8XCjaZNLid5il3Y9jmAAoBuBjPDElwWQUIAqhFZFWM44VQI+mjgBGEqSkF5rjjoc3yPjlczhPesWP/MfjUzgX82IBg2g5gz4fLzzUJw9v77iqGx4YgXHe7YgyfEaymRlxuty37Cmit9XIOrodfTTaMDKw2jOgM+scuKbY8HvHv/stDAbIzR1xMiooefXOu6/I8WMfSF//eshJ/a2SjHLhd88CsL4ZDFYalNOjgx4gNxpOp0see/wlZa7uwu/ttYJSWC2hVtxPXHsUHpklTbyfcAeF4Me1gvvFvttsWltk+TK4PhkiRhgZaMyAAX4Y54ORASMDRgaMDBgZuLczYAKj09bcJC6owzSGxWJWIMhyh8EAWe4jYGz/fszAogAIfQ5YLNeZCyzgE1Do4TMmCttkWcQw8p1BVsTICECDOjuChf4iJJus5bgaZc8oFnUAAh+w9RQ0p8MAQAiCNBcSYBcAhLB6ZgERa1tArdfmNKtCPYOMj2zBDNBDFBihs0AykGwgm4MSWrq81lA2p0CSIkENcFYSiaxa1xSCbBa0ACNe9A/FIy7PYn4zQIAWBwrzYLxwOUGR3Y3Cvs+Owq5qSwNf8nmTYpAIgBHmZGhK2y77R1YEQQgrQCMW/glgVJo90pSbloGxQenDMgoEIYAEJkQK7IeeUFBNYz4dWNfrqsh0WmOLsH1dvovyXo2hy1wRcPChiM84fvykAqK4XT/aZyvr1m6SYRybkZERDcxSS7ZC7zurZKpiZMeAgcH8EfwYGhxSIJAu/WTBdMqcobqAaXM90LW5NVkyAC84qFacF2Xsmy4vJSYW54riLEdxLmnrFiwoToB5EJ3WtMW9dVCH7ZDlQfkrnd3Bz/zTQRC+XkyeB2tEayuW0mATTzNYSW15qUXzksw4cb50q2PzwYkxsQKciMdN0oE6fyTSJpPQg2QmeS6HwNSIRNplOgmUg4EikLs9LPa4RzbtPAAwbUbaZVBi1UGpFEsykE8p2mRvrSxvAp8j4+OzSkx2rV+L+XGAHxrIl5xOyzlrHMfWfU19c22Dd+dfC3xi2oMuaQ1cXUTi6AjeINyOmIpF5eDBQ/LoY49KKBS64SYLBC3BwkriWJCBdOniRRkZm8J3Pi/53JdB0hvegLGCkYF7IAN2BxiStpqE27tkbW+XtOP65cHvFH8jvd5rF5zvgV2f3YXbc8W6lzKytH3hw9bExKACNyhB9eD+r8kjj7wgoeCcVJRuhk4ZrKUGQQaPO4B7CI3xwUElNFf3+33w+Pgyw9Bsrs5Ob/Qnmb89tqubslPSi8yS6ekxZcreKJvFNsj4KGCwgRFGBq6XAQP8uF52jHlGBowMGBkwMmBk4N7IQAUDM89eictHJ0YlV6AQvBZBr00e39klPW0Ls5Tv1t4bDJC7lWljO0YG5jKwKACiFkVBW2c58COxDwIiBABYdGFMTyek4IQkVRm+EwARwMNQ08lCqKXHZQL+FnwwJcBRKltkpplPqyAhWDUWQmkqLhRFquKC1DiWVZfC4jVLARR1vw4yMQhGuARMABa+43WTTNTVC/mimJt9ihVCECOHz05HScZAwtCAk6pE60ZIOvhB8ITSWgQ5WBRnzDjSkFpg/9LgEgAcKJH5YFEyWc3NrQAI2FPNGyQFFgwf+FlwJztCY1+wEeyVO4S+F2UK7JMU9llngDCvBEgYZIKQbVGcGkYRC/tVAFOhaJfoRBReIN3Kr4TgB9kYHshgzQ9dpzsSiSjWyIkTJ2Tfvr2qXbJG6LvhAzth/Nx5uUDJKxw3Gwr09MXguvQsIchB6auRDNg8mM/90I2Z6PvBZQmAsDjNoIFpJuuCR4ZD7Z/uoZKFHbsf+1bFOVLCOcJ2CAqlUJyYiyn5AsyXFHLSE0LhDl4vI23Tsibhm5W2mr+P9PLQfUJODYyq2WEbwJvRCQl3QBrNHJSzF49Ld1+XJAdycnJwXJ2LlLxqc9sU+JFNaqiJC2yORMkKEATHBfsxEU1Jx+atMgbAKJRDjl0hCWt4kgKS8vaQTB8bvKpLgyaLkrnyQzOakSlr3wWTJSC9AHQuT8cUqycxNoFzv7Dso7TzxYpcHE3I0JgG0Og7Ewm7ZVNPUOzWBmTrqj1d+gcPiq9r161V/jI3EmRL0cvmzOmzcvT4GRm5oh2ncLgdEimdEoB0lyMy539zI20byxoZuBczkEpOy+Clafn4gzcU6B/pcMn2retly9YtsnbtWvyOBO9pOReDAXJzZ3W1WlVG5wQ3yKjo6FiL+6M5k3SSJbLZlAwNnZVpsBdvJHyQtNIlqEZGBpUMlcejDVzR2yEjIzY5hsEoMdwX+JZkVG6GRGNbWy/a7leslbPnjsv+A5Do7Jz7TVDtRq/MmrCHwxG07zLYHzdyAO+DZQ3w4z44yMYuGhkwMmBkwMiAkQFkgCboFwCAvPLhRdSc5uTpe9q8sqHbv+wAiMEAMU5TIwN3PwNLA0Aa+qUDIQn4JeQhN8DR8QQcrFYzHpbh3QH5ATIuUhUf5H8gXYXlYlnz7Kg8ShQUi1kxO7KSv2yC/FBACH5wdDdHtTIyI/C1sANICAWE3h966HJX3B6lqlIw7KakVrMloz6TgaIkmYDQpFCAx6Bx5aXhB7MhOVNVgAhZIwpEAaDicWpAB0EQSkQR0BkbHVOMlrJfAyZK41pR2GHRRp8HWlnon0OLdQ8M3Zx9trN1BkTeAjYK2CShlha1jQweysnyaDR057rD2C6njU6kBbV8FWS3JFMJTU4Mnwl+0EQ9CaBEmZNDQopBFgi9O8jcoP9HV0dEmahTwovG6DRg53oZrBcMBpRZ/IULl+Q8wBAafNOcPAcDc7I/kgB3KpOjYO5AEgyFf4IpHRhdTGN0giF81f1gvD6ONE4BNCkrkIDgyWxAUonnil1QpgIYQKN6K3JC0EcHz8iy4b4TBPJ6C5Cv8kmTfRhN9ApZHe2hPTAwx6GCIbru98H2yQYh8DEAibDO1l6V309On5E+ACGMVHQKUlUYJZqoYn81FCMHPxqfH/JYxTGJpsZljWxEQ1aVX55HU46qrAELqRMsoeHpCxKAB8h6ACk8v+3QG5cprRDkwjmeA0g27YO0WAz0j3AAklqaVww9bkqQTOvq7BQ7zpMnN29W+xaJ1JYd/GBekvjhf/PTK/KLjzRPE06jB8gD69rkb761EwDIteVFVGKXEHYAdZs2bxGySpYSBD7Onj0nZ86ckk8/O4lzyCx9a7ZCOmWt0nN3oD0FxM0zoV9K28YyRgbu5QxwBDyB5xykflhMHhm+JEeOnpWPDl6UnTsi8IPaLlsBhtyrQMjSrjB3/wzg9coCuU7+Ns5nKdz93nx5iwQTHAAGGDkYn2dmptV55MD9E1kUOTBsT546LEePfqDuGzhAolDgfY+2zPwWSwDfGLzkt4TCsnnTDjl29F0ZvHxCGZFHwFLivQDns/14YlJe+/U/KA+S3XsekZdf/r60hq+WZZy/DY6Qo1TW7t2PyunTh+XEsffk0MEd4nz6O/itbgGTpApZryvyCYzXaYDuxY3Dho27FLBzt4MPs6WS9pDNgSVGrJwMGODHyjkWRk+MDBgZMDJgZMDIwN3IAKW+I0En1Dvmhlh3hKAAUx9UfTf6cK1tGAyQa2XGmG5k4M5lYElPZyzisljNUaaUh2oMMh4YzRY7RvppcwhQaMXxGh6mZ8QLFkjJ5ldAhBdF5zGyJbLNUm1rEnOqoKQ8nI6gWHwwpIb3RcwegzRVSLZ1akgAgQ8yTKZhgu6q+yhwG5nUtJIu4ih/bxEG6yjQB1pDMj4whJH80zC/DEpP0C5DkFnyuqgHbVMMEKsJD8WmpJLA8rRrvhWJBKSsqH2NNso2k5rHSJsq4qk1SbnFJj6wFGbizRhVCGZLne2h7z+X1Qv6fF8EW4RgTKPJOgGWCAr9ecs4aCqw1AZoQACFbRCwuDIWRcHXp0ABBnNIeS8CI9SzbnyYpixZAUXhdshIMWhOrgeZJJGODlWwOPnFSdm6basyPWerkVoUQEZQMpj/yWefS3/fmtn1JqNReJHPSFNLh3hNWlGDbRAgKeNBvgDpCbbdGDrwo3uCcB79Ysg8oeQVg2CUKrfUkfdZYAT7p+ePPiACVbBs1aNcWcjmIPBh9/VKagSsHMyHrYR40gOqTSvWnTkZlbOQpnLbAcSUgwBEptX7aSyfA3umb02rYrKMQeoje2VCRiBPxTBDBmwSy7lckOvAuZOoS2ac+OC4uHF8aIbuqvVIC87DMg1o0H89EvtyEjnhF2cApueYSBBkBnJYiZk8/D82qu1OXDkn+YxHiq0BdSxxtAHprJwg6FEoXn0cb1fv+EMOgblFm2ORaGxsVN5771355JPDAMjcsmH9XvnKV/aL1+PD9xC+KpBm04tmizZoLGBk4D7LAOV/eC1lsTfcEsH1bpOkZx5F4fkMZBA/lGPH3wAQckKeeOJxBUpSAu9eipXKAOE1MOTzYLAARpulr2bPrpT8B4Ntik0xMTEKycK3lKn4mr4tuGfIA5A+Kh9+8Bp+m7PS3d2rlrk0cAaDJr5QklYM+mgxkolxzDsJzyf+nnrUtXvPA8/I558fkk8OvS6/+uU/qOU2btytrunZTAreIm/Kb177ByVlZbc9q0Duxa7zBE6cTq/s3fsMAJCjqu2f/Pi/gmEypgBzxokTB+X9916RUrEgBx56Xnma2BQQdfeC+5FOJ/A3DWaLd0VIX969vV/ZWzLAj5V9fIzeGRkwMmBkwMiAkYHbnQErvD62rm2Rv/XvVtLsergAinS1oLC0zGEwQJb5ABibvy8zsCQAhIV9SkQxJiEaRJkoekwwxlJaoZwMjBKKwARCaIbOIPuDQfNoP0YVlgFgEMTgqO9cAU+K3iYAHzA39+lyOXlxAxCg8bktpD24qlH1kNVi6NrOfCWgQv4GgZDmOqLrcMxdyDwABtinULUo4KpgOTNYKRjBCICBoIYeyqibhXnIVM3EJiXnA0gz6RRnG9gPGqFAal72Ja2K2fwji6KaimJ0P4AOgBMEhhgERSgJRkklvbDvckEiChJPUivIhdiUYhxoEmFzRXW2oYfVru0Dpa+onU0Qhe3rXhzZfF7lj8AJL+TDACQoj6UbfsbylLSCkakyDXXLJPZpYmJCtkEnngySrETEA9kw7jfZMQQ0lPk5AJI82gZHQ6JTmlEH94H7wzYZ3DbRcn3fCODwj6F7gfC9kvWCvJXfFxRvbVq8zoqkTEA36kCC8kUhswLLTqbrcl5gjCiQI2mXlE9j3RDwyCcHAYrgCOZ5DKyS9vRJ9tz7kLnql5GoBmh0AmgYgTU7Tj8VodZ16rUZgIed23JFJOlKS2IYo1lbitK3ZRtGorYCMIN/TcmsgLlaAv4xFsi4gehTzuQkY4IcmH0NLWBUeMFa2mApy0m892BEa3+dptME0COZGlSeIPT/iA/nxeXXjiEN1semMwC9bo/BeL0rN/3idjTLga1tYNZcLU8VCXqF8+5WUA7s1KnT8tvfvi4XLs4A+HgcIN1+jOQNoUhmu6oYdj1t+LvVX2M7RgZWcgb4HaFPAr87wUAYzMGgKlSzYE0g5NLAj+T5556C18OD6rf5XhlttDjMunxHjbKVHs8RsHIuLspuuFO9vJYMIc+VSHuPPPbY1yEXdUkxKtJpyEiG+xU7k++7urfKCy/+uZw7+zlAjL9XjA7eYzz33B8ChHhSOrvWSVdXjwxcOi8//9l/U4DJvn1PwPj896Wnex1M1b+nduvM6U/khz/4PxTYQrCOTKXxsQF1v/HMV74jjz/xsgI2lnKdJyC+du0W1Tb7yX7/y4/+i/IxYehslQf3PyfPP//HYGKuWRb5K8p+kZm8/8HN0oRBNUYsfwYM8GP5j4HRAyMDRgaMDBgZMDJwtzPAZx56n4Zna41zPbhd/qe3sk/3yjPZreTAWNfIwN3OwJIAEBb9CYAEgjA1R/GSrAQyFhg0JNeD4AeDElMsvI+MDqH4rc0to9hOmSyaoJvMLMAC7CjBKwIXJLI6MijqZzJpsByaxQU5Im81j4dIsByAVZSxDkGPXJ5jPrVXvw3VbshtcT0CJBVsxwa2Cf07chjZv8FflHOjU9IMTWoX5k1kq8pInTJYJmccslGaBBZBBCfYCn76b7jAFGF36ybR4apJYmbuH0AYSGHRE4SMAspCzaBwb7VWZ8EPTtdloOiLYbVpKDNZHEVICyVMkE4CuGIFKKBHpblF4twlb4t4YdLdgvkXAdIEbGExRTJyqccpoaGyJLPYpqqpg8XSYpf4YFnMeGVoNX96c/BQApyQkvjBskkAQ6i6atL6wAbJT43KqJPlIm3MbBoj9FM1i+zauwdgg0XM1qLEsV3xYoQwTOHh2a3iTDEq+2hhAiCFBuiax4e2Xzkb9hf+GY0MGJ4b3F9OU8AIAY/68ScQkhIN2CD4wWBBpMUB8AefvdACJwDSFMEK87yuyQYR/zQKHHYlfzVm9qnjtHVPv9TiefS/GwWVgmIbBQIhtX2eB/T4MHm4MxoQR2CifU2LWKsEnFxYB6yDgE1+d2UQrJFBCdKYHmEpwmgVAN1oCnlNxWUtptlCdvEGW+WhVKukMbqXJusEXhiVoleGIZtG9knJnpIETNIlZZcc1Lci3t5ZkEgtvIzhgofM3o3t6m9+3K0fYIJ377z9jrz59ocYqeuRp576thq5Pt/Mdn7/jM9GBowMLC0DLBTTd2HXrkchI9cNKaN35F9/8q5cHByWF5//KliTHfcECLJSGSA8Sr09PRgoEYSv0QnZvHmP+q27W0HDcDIuXvr9f682Se+M+eF2++UpyEd5PEFID74Dr7FhBSr4ICdFmSkyLXp6AHJ08tdP5MqVOdlEsj03bNgNIOJvFfBBUEMP3r9xXx/Y84QyRD9x4n05feY4mCKTajk7PEcePPAc5NkOYDtPAojpviGWBBkdu3Y+otomk4RsEPabwb5TfosMlPm/KbMdvMNvimCfXB44hX2akO3bvn3Psa7ucPruSPON4Mf3//qvZN36jffE9e+OJMto1MiAkQEjA0YGjAzcYxmYq4DN7RjBj5UwkMpggNxjJ5uxO6siA0sCQPyWktgxit2O1yhqx5SJIgtE9+RobdXYINF8VLEu9EJ51YQisEaOwEh7cA8AWMyvblMruxH8oEk6g4XSzjbMg48DgRMae1DrnEHQRAdT+DmdZsXciW3xQTijMVAw8I7gC/tIEIWj/ANwF1ceIYhmaRUniBm6/p9iWNSZcf5aUFsO83Mo0s6UxjUWSIlyFjDERoGdbA9GKgemABglnQBJdDZI1uGDtIPGgjnTnpXRTL1AEJrTHlQrg7VwVdQhARi9AAAgAElEQVTnf4zpHZCUGCUzpUd7TQOMmI2IyCAYCdeMes6BT2jhscip+MCXFh+F3FPi7FnpkKspgNlWGKO6bHJq4Ir02jdKD9ZUBuj1IPjR6bLgGAE4iWunUCTSpPZfBz9YJOHRyIMJwqD8F6Wx9GKFmobCRU+HDwboNim6wdBBewVHJ0CwkpRhRj4WhZQa2CMEPeDJql7pDeKFzFm6lBFTBkBRnVzRyEZR4Aul2cC+GRm5Ih3eZpmwTEtrrV3WQFKLUS1ewb9ZcYN1wHMt2InpAOBKeZ9U/DnJzkxCuqtT2lsCyrTe0lwRWr+boUPuD8Cc/uSAxHBi0YvE2oKTbRLnEl57rWCEwFyeQItU3Wq/e4IAdlbASFD+yKaR40yd0aMSgSAw4nFa73hRIJVKyRuv/1be+N3nKFBtl127H5cQpFhYsDXCyICRgdubAY707+5aA0bIS8rT4bPD7yvZyG9961uQNuq549/327s3X25tJTy4fLlX2pRQuFX279upgKfTp4/Ijh0P37XrHK+n69ftkLX921Rn6PnBc6Ex+JneGU899bK6DhOcoJcHpazo2aFLEG7b9qAC0ZKY34x7nhbIY3JeAPdtXwWDgzJTlLXS55F9xCCgvWXLXsXYeHQqht/YpGqfy/kxKIVsP4sFAxTm9euqTi7wgcsTYNm0aY8CaMi24PYZNGCnBwlBkhttd4FN3fAkmrAPQH7u3Pkj8tST+6Szu/uG2zBWuL0ZmM/8MMCP25tfozUjA0YGjAwYGTAysJIzUK5UZQj+uicuxqRQnlOA8aBeuXN9WDqWWQbrbg1AXcnHyOibkYG7nYElASD66EUWt4vFmir+EwBhaHJOkE2CfwSBBoEk1UyZfiFVochREOwCvi8AAEkU7FJOorgMd3JKXzFi48OKEVKrlhT7o1S2gD3hQjE9g4J4i7T4AVygnZQfXh3OtGJyBMHkcFjzMLTW0uVGcZpBIIXbLFTSGJWfV+sNQoogUYT5EfxFCH7QA6RgDkhrU0FcOZcy56YcFF+hsCQEP3RQhKyNQMkujqoLIEhSks0QiAKzhUHGh3NDRBm9zw8NDtKmhhS40ABezF/4Op8JQjD0V74nKMLPja/XaeK6sxI4Zs7hnAQgCxZvQYF/WANqrDAWV+ALYjCSlVApLqmEFUWNNshgabpg+ToTpRwaF3dzu2KyVFDgmEzWpL/HNiuJVQIAoFgiAKISBCYagJBW+LMQ1CgiRTQ5FzBEClN0zNCC4Ice/fAk0S3WaXLOSFqnwAAByGUNfMmfhqBFHmbz2vlZ1XxCsAl9m2STUFprJuGEv7nWnkDDHDbwEobwVRySVuYa5MbAcKKUmyV+GpJYALXA/JDMlAI/mrDdtjUbxIf9C2/pBTNnVJy9PdIXYZHGJtFMVVoB5lCKbCUEwY/XD1+WT8/Utd3qndqxNizfeLhffA3mYLe7vzr48ctfHVJFs4cegvZ73XT3dm/LaM/IgJEBLQOUFqJHyD6M6Oeo+Q8+fFV++tOf3hMgyEpmgNBvZfee3XL23DklQ+YPtMqa3g13rTBPAGAxYJnLEISgAXlba8fsV6ZRjsoKeSmCaPxjNM7jfWF/38YF1+NEHazo7voy+2UpklezDS/wxmJpUuc1/xrjVttdYFNLnhSbHJMjn70JwMgujz/+BICY+QNeltyUseBtyMB88GPLlq2rHvS9DWkxmjAyYGTAyICRASMD900GypWanB2clh/89jT8WrXaFnd+TbtXQn77sgMgBgPkvjkVjR1dQRlYEgAyjkJyERJWOvjB/judDhS5LRKNjclElJ4STlUwzgCg0OWqZpoBetSJCrZmMDRgsFloRtkCowMdTSYlaRVPsPCdk86OEB4YAXSA7OFUBs0usAvokwHQwZlHadyjmB0EN/QgC4SyWx2QmGIMYJCjGyYOBFrIGEmhPwRfGDF4gYQLVqHHRkEqMlJJSicoCpRsyqCPaUh6mUNaywRDMtaUuCBtBAtybSL+bU6OSwnyXmTAkO0Qu3BBLpyHOSjq4q5wRDLIBSMd1VgPsyvOezPuqsp6V4tah8H1zmc0YMUa8kpvXqM1XLh8TtahwD4d9qN/USlOpcQUu7pt03hSau107tCiFg5KaM06gSiUnEoPypXMeWnLt8mEXSt6872+DX375w6ekqw/IxZ4jiTHx+C7cUkqgxoYcixskrecLeJpDcr68PrZ7fiLATmVB7sCAM/Ousw12TAdePhn0DeG3iUEyyih1uhzQiDEryRBKrMsj9mG8UYHPiiJZYERuQv8msZob9f6kbw4ApaJS9xsH9samsoq+TX6rtjBLqmlp2RDp3ZQfa6shGudCvSQmaACXrzNe2QY+1CEvJq7E8bvVoecO3RB1qwJSWt/GGCIti/jhXHImJnEm7DLlLkERklI1vVqTBKCO5TeikR6lPxWX49WkDl9bpwWN2IP9QHcmRYrmE2WpuUtiORLFRkYSct7x0Zm02lt1oyRv7qvV/m/3Ikgw4bMj0bwg8Wz5SxW3Yn9NNo0MrASM8DvGYvhGzbsUN1rBEF669exldjvxfq0khkg7HsoFAJL4mkZHf+pHDr4K8WK6OzoVUXYlXLt05kS1+rPtabrx2ax+VxuKcssdqznz78Tbc7fxlI+k8DCh1eCH++/93Op1qbk29/8DjxPtHu7pbRhLHP7M2CAH7c/p0aLRgaMDBgZMDJgZMDIwO3NwP3OAKECD/2IjTAycDczsCQAhB1KJJLK+JwG4ONjAAIgiaT7fxCI0L06KGmlm5XT44NsDxV1U3O+JRgi8PioVcE+8DsUA2QqrgEhDltNSvjsdFJma07mKU4JBoAdXvhb0Nic4lGUuGqyesTm1EAOc3ZKEmCYlDCdTBPGDAAXP43O60G5qunxUbF4XJIKZ2G6nZKkA1881IHDUzDpDE3LBBERgCP0AIlDwonMkbIJrhMwxva7UAgHaHKlOC2fDY7LwZOHUSxfK4N2gESQYmjPaAXl3Y6gjGbNUkbeWFg3OzTg4iL0uQliXDwxINXtfQqsSGNd0+VpeJQAaMD+EZggeKFHMJYQ8FJkXbUFhuUd4vDNgTJSB224LLeXvDAmg7FPZApACIEOAh7s08OWRyAnpklFBB7cLmR/EHgpXU5LzxaP5JIhiSWmIIeE5I1h/61+9X5dzSG2olNiZ6dk8OwRiaZGZRKgyE55QnWvuwAODNJLmTAQQdB/j2J/0DMmOVMFq6CkABAG5bJczowyQBcwNKLT9XMDIIYOetAgHZmeNT63g4+RMdOUHI2DJUL2xwwALzI5CDhk4AGigyu6JJu2rTqrAx/c/qy0JPolCymq/loKgNcg2CS92MYROTMRk0zIL5v9nQqgc0c08MkDKY8u79yo2EzCJucTCeUnMwCWkxuSXZs2b5SpwdPKS0RckD1Te4n0QbpL9yOhIX1qIgk20vKHvbkJviUu2bep7arOrO8CVwrz7kTQ8PyjDz9SslcG8+NOZNho08jA0jKgyxJxaYIgb7zxmnz3u3+MUfS84q6+WMkMED2b/es2yB999yX50b+8Im/+7p/loYefVybhBIBZPF8phfzVd/SXt8f6scuBIToEfxQyPwh+fPcPXoL012bD+2MZD48Bfixj8o1NGxkwMmBkwMiAkYEVlAGrxSxb+1vk+7+3E2o0c8oibtQGexsGEC9Xl+93BogBfizXmXd/b3fJAIiepmz0CsAQGo9rNLJwqSBRGGiXzRoIUQYDQzMrpzwWDMYhdUUQglGaAmzhhTwSXrMAOvRw2DmWE5JEAEsyYIGgZj7bhr5MLZ9GIbyqPEamzPArAIDCqBRjMg48oFKMSy0PdoalKs1F+HFAUovAigJb6kEWSHk8rgrdENRSU1moz5az8AMJaIbn6GI2TXgF9fZqVVK2rNQgl9QSKCnZq7WVgqQ/S0nZe1kuJUelx9EjvlqL7Jtug5G5T1rBomi2xNWyyh+jM6oYI+vW9cs4iu1nY/AowbxSe1Q+S16R44c/Udvaue8J8cNLhcAEwQ+yQJyOTdKRrspad7eS7jrw8Ga1LIvqhVxOIaZETts9ThkGuEHmCsGHt09+JO++e1DyKPibcHHvjzwofXbIVG0LK5mrwi8/kw39rQAr2iQGw/m9ax9Q+55NB8DogbyYqwBPiGaVE4ZlsiDlln5pgzn8RDwrn378hVz49Ttq3okd+2W7kJHhkaDPpbY/CZkwgkZWmoKXUjIazYvDQjCLriAu5YtBWak503LKTGnnE+cNiU0aJRl1qSoWjWh4ztfodEa1QyN6BsTMxAWvjwRYSQRcZoouNY3gR9jbqUCU9MCUjMPIXkXqvDIyn4mOK6AnkYVJe1EDzSYLaXnG3iKhlhbFDCLAoku9Eeijjj7PQwJ0ZH6ooOE7XwDSnB6Kwfzcrclt1aLwLKF8WN2sRFt6Wf6lzwelrp7e041zR/v+WcDEsoE1w3l3Is6fPy+vvf62dHSsV7JXBvPjTmTZaNPIwNIywBH/ZIIUIRH5yeE3IH/0tnzt+a+tSrmelc4A4RGhFNamzVvkL/7UAgbcr+X13/yTrOnbokzKlZ+GhYzSO3PtXdoZYSx1MxlIpZO4d0zL+XNH4ftxUnp7/fL157+jwA9KX/GBtlpHt3gOGHF3MmCAH3cnz8ZWjAwYGTAyYGTAyMBqyAAZFl1hN6SurpZjNeN5iEboyx33MwPEa+qSFtNGsZs0Q4F4Ff665uN3jBHCmimlfxnlsjaY3ABglvsbsDzbXxIAQj8DPwr7jLEUPD5mYBZd1gCIS4kymBw0BYcMFkb/MwhkkMkBVSJcXLRNKPADUZzQXulnbnc4JZ8D+AD5rFkQJAXfBbUklK8cDrkyNAg5gW4U09E25KxKFYdU4cehS21xOXuoLCH4Mfjgv8leTsI4HUJUkgXzwgHDTk87+1CA3FEVJfEZcaJI7wF1Ip2FLBGN021zF8DaWEaiOQ0ESMPZOpexSRjPr+vNFbG8Myi10Ytytskj2e607G4LSsfDbRKxhJX3heD/XGlGkumCMkd3NLuVHBOD4ARBBoaz7BTPpgekrdInB5IPyNnuopyIQq4Kf1OXL8guGEQ/H94ojta0nD53EcahHWI1zag2fB43vrRgpNTBD7aXil+WTM6pivVk6DzQtU0iT/cqua+hcFFGsc8Dw+MSA5Nkfdd6iQRt0uvqkva2sLw9UlBG4T6vX0AgkXI6I01FszjbkM+cRUlL5VpsyufDHnJLG1rb+9A22Svb5K13D8vnb/5GRjvPypavPAqWCsANSEIpgIem6QA/FHMGQTYIo7kZxuZNJQV+UL6KDA5Gi8MkqQq0tAAwMcrIKQGToak0lic7QTNYp2l6CicIt5OHZwzBDjJKJuuAFrAmBVgok3oc23I+LGNgbGQJeFSdMj6Sk3YYsBK4MFszMg7Wy8z4KZH2sMxcvCTutf2yo61bWsIwMUfMIKdsi+f/0JVBIcOJ4YR/hR4Ec+g5wiCrpa/VMwvStGIa5bZWQvBHlkDHQmDH7f4BJrwyDW+Vd999D+a3Htm9+0mVEyOMDBgZWN4MkAmyefMe/B6MyHsffCI9kMHauXPnqtPHXw0MEB5pFsDXb9wkf+Lzw5j8kMr56785JeFwO36vaSq+vNKIy3s2rq6tlzDghzE9PYrvzzgGSTTLC88/KHv37cU9Rcfsd4j3yONjo+LDMQ+3tq2679bqOipabw3wYzUeNaPPRgaMDBgZMDJgZODOZYADUnKQ1p9K5lE702pRvC/nANCg1yF2651RwFjqHt3PDBCXtOK/bWKtabW1vCmx1LQtuhzBDmcV6jEIbsdhwqBuPDjay3PuzflaQiZNZ1H/HL9joMuiHTUWWJYMLAkAYc8o+6SP7NfBD0pSOexkfDiAqJmUDwiNyBkENXQmiDmlPTRWwf6w4o+hy2MRBGGU4ZGAUgEK/BZlhK5AFDBDHHZIIGGkncvZhtGSgDWSms8GZbY8YD7Q4yMAuSappmRkIqvaoZl6LWyTVozCZ5DNQLCjMVjoT1dS0lTxi9Ov9YnMD4IfnDfTW5cJQh083xeQ40MpsexAYX/zNsm5ixJzbpQApl+EWfhFNtyFfY5q+55t1b9cLO5rF9YOeFCM0gQcklUjwi94/Us+T32EklhX4InysVcDipy1MGy5pyXb6pQzFP6CIXkHjElHATqJdh1XpuiJibqvA3xCaOYu3Sja97kkm0HSogAbwBC5eOSXkn7gYfnOjodlMD0sUvfCHi01idUH0ANCTZ72Epw5tCC4MFaOSXoCuUM6XFMaA2Qa6wXx+fe+8g0Z2rRPPj9zWH7zix/KSN9++frexyUAw3ICPwRPUgC6ivkZSGGZFStEASMOqzIuJ0KmPiMm4bjR4ihJ3j4j8ahV2t3tiuFRLMIbBeAIARNGdNomwwBNslmyNbR80/eDoANBEYJ1ynQd0dLnkMlxTXoM9ucoUATF5AGQQwkuBAvyDoBBLTaPTOWzEkPedbtynuuMyXhBnLacrF23FtIlvWoaWU7jk3FVOEzEx1Tf9O0SCMmY0IfctGrf5bZKE8zjORrULMs70iBfrMjF0YQMjWlSaGpnEBGMjNjUE7ytNwEEoQ4dPCTHjo/Jo4/8nkQAYhphZMDIwMrIAK9Nu3Y/Lq/9+rK8/fZb0hFpx0AD7UZxZfRw8V4s79V08f41LsG+Mr9k2xw4sB/m6Ofl0sWLMjI2InE8lBmxejJAVvPWLSHpX7tP1mLARAuYovMNz3lfk4C8Kh/ACIAYcWczYIAfdza/RutGBowMGBkwMmBkYDVmoFiuypGzk/KLj85LrjDnNdHV4pUXH+2TrfB9Xc643QNQl3NfbnTbcesZ1AK1tTD8HH6706gEWxVLgwyNG2Vn2DFIOVDcpMAOu/iFDBMGNHVmu9ZUmRt0VjEXpNW0TaK1L2TU9rHkUd8z4v7IwJIAEIIZ09NxVaym9BWBDwbBDhqX0+KBPiB6hAIOBX6UYJTejEI1gY9yFGirt6peGf41c74cbEPzACkr5ggH7zeBtYDKN4rW7TIGzxGHNQ8E1678RVzwZ+BIfNoWEBQpYIT/NKSZYkB2u8BCUR4kWS9O9ypeviw/kMUoeJPfLNYYNuRLSCXL0ftxBZKU02VJYl4sdl75eqg4rr3gW0mMht7dJJjMTcdbGpTTc4NRvHx1gVlNbIjx6Hn1qb11vTInp0/H/HgHTBAuZ+rRfDwmxq9eTl+vcX16i4xBh5rRs/kB2TKgFexpVC7b3JJte1He/+h1iV66KAd2f1VyW5skCYCFMYXtMKIhmwJU2Ar/EgOYjuuGBfNVyx58huoT3/da3TIY8UK+6xnxvd8tB4/+ViZMkwpgcXvbJZnMAK+JqnZ1QITn0GQO50YJ/A+AJGSL6B4euVwcTBayBFA8gBcIjdS5fA5XxxjoKU6cT6UMjinOKZ6HZAMxWgBwRTG9Ff1mYS8en1L+IOU8fF4qYHDgBG0HEMHga6IcAkhC0MIl7bZ2sbZhb1JTEna1gg0SkzIYItxuIBhUr/F4UrFrenq7VZ/04LT82ITY/C4FyiimCLxAuAdkODUGqZbLHUmAdb/8YEB+d+TyrA6mDV+ih7d0yt98aycAkLkfiFvt68iVK/Lm2x9KH9hMa9duUSbMRhgZMDKwcjIQAopNTwrKMu3Y/oWEwq2ryrtgtTBAGo84C+UEQlgU37v3AcXmNGL1ZcAC6TIr7luvJW8VCATBqnJLEwbj3M8Pt3fjyBrgx93IsrENIwNGBowMGBkwMrA6MzCdysjR89GrBkPHuwry1AOdy75D9xsDhAODGAQ3CDjEmrQiKz/bUXDsLz+v5g9XD0nRNr1kUIJgR1d5PyS1Nqj1G4EONWGBaKraUGm2SZf5QcUMGTYdml2KrBE9CM4YLJEFEriKJy0KgLBwS68DSgHR9HxmJqUAEHoh0HBck65CgdyGke9YjuADwQ8yOKrw/mjGqHf6cNhwjSmUspL1Z5QpONedhsE4GSBcllJYDBaOAyBQ+ANhyC5pXAQyPShrZQUIwmkz8GAgCKKzTQiGcJmShcCHxibJmfNyGbYMZIEQ2IhemVHEiAwkkgLwiYjTEDzsFU+FyC98PbAM5a4oYcWK//nMpIwPnJeIKSS9SZ/42iOqfx11w3WaZKfRjyTYJ112s+RMvaDX5eSB3naZKERVW1NNWhU8hEL9mn6fMk0fPXNWTD4NbW6DOfnOme3S49EAEN17g5Ja52A2/hMYpY+Ogamxs02BJAQ49GgLa+s0gic1GJ9DVEMtQg+R8aGTMpGdlM8xnUGAJtLRJ7HPjsi/Vaelx/eUmk7wRi6rt7NB4/RxF3KOdRS4k66/6p/x+pPGFaA9VoPfyMn335DKYEz2ff1FScRi8migVQEWlAiz1HIKxGDkIAMGao96T6YIwQOyQrIwjecyOouDIEkCi1WKYP44e5RnC4OeHD76ydQZJJxGxkgUfU0ALAnUXcdbXWaAI5TL0pgmBD3IANHbJ3tD84nRgCMB20gBaPUga8UNzxWe/2R5cPtkg0xNTqrPNYAfA9EMPEFKKPSvUyM+ywBTCMJEIi7F/mDQG2X+KNHZjdzFNw679pUn8MGwWRe9BNxw72h8fubsOeSoJI8/fkABTas9KpUKdN21YqUZsn4GoHPzR7QGNlQBNz2TUzFJJmKzDfn8YXynffi++WEQvfyA4c3v4epYk+cwDbk7OtbIocPHZPv2bauKBbKazxAWxTmwxIh7MwM8vivh9/7ezO7cXjWCH9//67+Sdes3GoDTdQ465T+ymRl1T7sYMMf7OP4ttiyLN7y/5T2S3e64brtsb2ZGY2DTG/F6kaurCThWwXWS33V6OS2W0+vtrzHPyICRASMDRgbuTAYi8P/gYM9cQ82oCzVAv3tuMPad2fLird4vvxu6JJWrqgELBD4IeugsD84ne4MABoEJr7lLBovvK4DkekwQvd115mfFV6n78i6e9quW4Pba4GjMbRZNGDhd8yjWCKeTJTJjmpCx6ucSt51ZMiBzg10wFr/LGVi0+knpHo56p6cFi84EPwg8EMAgcJGIk6WhyQKx7ykzLiZ4rvf6AuAdYD6Wm/X/AMiRgek2ARA9Evaq+PMaS6Naycp0mcgKCuQFk0S29EkIxuXa7TIL5fDwyEHyKuCR6QL0n5xFcWetYkMt1wbD7WrGDl8HFq9hQF6/eYZnuwoFvtSNnmOZKbHA08EaA1oSdEh6HIVNZ0qBGaFgSCpdMNdORwEmrJfeCxUYWofEBtDEg3luGGV7XVbx2f2SLAJIsZYl4FkrwVpWiCP3u8LwRMlIs9ci7smczGBfO51B+GmYhEBIDGBGsz0gmwftsq1NQyn1XLRafBJwletshQ75m/3flF8e/1Q++83vJNy5VrZDYurxnRuUSTsNyunLMVMaV6vz/Wv5s0o+qxt4JlkfERcMyMEc8biD0tamXXDanj8gA1sfk1ffeU9OvPW2AkTanS3SBiN3AkPFsFMBOm1rAVxQEgym6EMzE+KyeKULslQzHoA3sGOJZFMy5vTKWhTRLuLBh68TfZBQ6TugzF4pifXth56THKS1snGwbXBa0A+EkliWWgr+KGATAVwgoMAHIYIgDMpZMQg46CwMfo7H4eWCV65DQ3hKainQhKwk/PE9QY1MlsX25CzAkSjzIaugAAmHQxO44nIEPvhHZgnPTxqhZwC81QCAJOIxtKmxZwjGKMYJtsH12Nex0VFlsO63eMQPL5EEjgGBGr9FG5NMTxA9huGvwugFq2S5wweGzIGtbdIemvv+2SxNwhsDt+P6D6M30vdJgEMfHjyqzH5bQhrD6EbWX2nLpvE9+PTTd+Tc+SOqaxvW78Ho7ScButZRtgU6rBf5s/Q4AshrA0h7vxf1qZ8/ER2TUycPytmzR+A9NIjvVAZ/WRROnAoo6+zshUH0Htmy9YC0tUYMf4QFzq3bOakZHlnbdzyiWCCUZVpNLJDVyAC5ncfOaMvIwP2cgfnMDwP8WPxsGBq4BLD7E3nhhRfF69VY3wutRaDi0KFPlD/gSy/9/nXB2nh8Wt5+6x11706Jv2sBf6lUSj54/wPFDGa48Cx4rchAipjPg0UMRgqF5vS6F1o+PQM55CUUsbicucl53e3q7XNZxlLbDYVC8gff+bZsgNeTEUYGjAwYGTAysHIyYLWYZWtfC4zQWUWaiyb4gLAustyx2hkgOqPjenmkF0dLbaP4zb3irrVB1wX1V9wCEFDQzci5TMS8a5a54agGpNf8mDhqAQwy12wBuA1dKovrUSqLoEmvCctVrl2TuV7fGudxmw7YATQGQRAfpG/c5jaZLPdJ3HRnjdqX2ldjuVvLwKIACJvXitPNqvDMoNxVrZoDWyMHBgfAB1tASVLRG4Gm4xaPC8bhVrk0OSYlgCRFeIRY7RobIsCbUBSkyfdQJuj4BlAiS1Ao5M0pNKhksgB9Kfz5rlikAKaD2+0FWyQhMQzi7wxwhDA/g2mShJRVkVpUWBy4ibmm3bSyL34QqYYS8MUAYJFBgU1FXb2IxW61DkAR2GuocEAyK9TkkEqvW85D/ioI3wy0CPBj7uab3iBiMysgqDFK0ai0BOBvgiI8mQGUZ2Kx3wFwZwYADj0wMrgAf9TLHPilF2bkOusjlUmqXMWwHLWTInZ6SFghr1BUTIiv7XxamLPD509I0jEmbnhs9ENCgw8bVvwVS5q+3UhmTkrDOZyTr27ZKCVHGACST4EQ4apLujoiYoOxfKulVXZ3b5cfvfNDefXDt6R33VOy9+GwYsHQL0UPGqU3Z7ScOuwFBaLsCGg+IILi71osSOBlJ1ZxuZlcDWT5o++8JD/68SsKCNm5ea/099PgaC5jPI+sJhjfN2vHheAaQRHFDrH2QAALRB5LVslnNTenlUxWAJQOXfoqB8kz2t1nc1ExNYdVfnlJVIbtJbSFT2SD4KSEwZVThgaHJFOyAgAhkI+6CzMAACAASURBVDIFwAJSVegzAREnHJF4foodklw4L7ybt0p6dASj1DTJK7I8dD8QvtflugKuuVEDfhi7R0IAfxAz0SHFDNGDBd6VwoCg0deDmyPqrzF4aG7XaGqOMDx96rSMXEnKi9/4tir8r/YYGx+SV175Ozn48etqV5586pvS2dV/XQCEP8yfH/tQjh17Xx7c96xs2/bgfV3M5/fg9Okj8hoK7SeOvSdXrgyqXLrcblwn6RcFcLw+MrSlpVUePPCcPP+1P1Fm3Svl+7Paz+OF+k8WSKS9R7FAPv30sGzdugUFp+UHaxfq6/xpt+uaNb9d47ORASMDtzcDBY2Ae9savRHmB+9JKspjcGlxL4/k5/1/GL+vSwnex3LZxdiuHBTEe3c+/1wrCH688fpv5Rev/RI/+h7p6NHu87k8Gdh8FqMv5AxUA0aHhuStn/1CNrSH5IWnt4m3lpFU0YyBc9XZV67HacdOX5BffXxeHt+yTh49sP6qZRrXSXBQzkcXVPe+9vA68cO3p7G9xmUHh0fk3967oLb/7W3PXGuX1PRDl87Izz5/F2BSQLZv224AINfNljHTyICRASMDdz8DZFg4oHZhC1wtxU2FGxPmLXesVgZII6NDGYxfJ3TgQ5elouzUevNzMlPeJYmaVg+gF8d8EIOABOWpGotUBE9SZajjIOxl1IRNbYqpcaejkSVCUMbwDLnTGb+z7S8KgJQrkLPCCPhkMjrbkzKYGGRo5AN2CZq0QjDBD0aqdkESVxzwUehUI+vJ8LDnKC/khNdCQIoTIHhwGjwTGBkAIE0TzSiUhpQ8lilflRo0j2hM7YNGeQfuqW3eZvh8oJAKVkcV2xsC+KFH3NQk9DfvVMwPjSWSSE4p/xHVfh38yBQT4qJZekPoN95khhTh++EH84PgByMU14CAFBgVZIsQUCGwQ0CFBfycG74QYH/oQbNsnxsyWpB2ojeFFas3t7aKF0yGZhTwDx77WIJ9WrrDhbl+dLR3SdAHwAhABWWJWMp325tR9GsT+L9LaWBcNm3dKdPJhERHBmRoeFzadkI7HMupreN6nq9aZNB7WvyZVnEeGZRH/XMPODx2OTM8NAB8e9IwI+c6AF3E6pI9ux6X4bMz8tmFt5XEF9klDDI/JiZgRgRmy2h2WtLTU5DzctIHHUVKr1y+NKYkvfQgC2V6BHQxHKLSODqENBEE+df/95/lvVd/Ji3/c7e0+nrq8mIAnvAQRkCED07qoQkskFzZKyVTCzrpgRcF/DnQSJPJIxUvpgEM0+nyBH6Y32opKSZ7J0AUGKUDAEnOVAFCaewSHp/RaF4clml1PIpgbhCgSlWapYpzy4GzzhFp00ASmLnosmwpHFsGR32R9q+bqusPdmRCaZJacyPXyPagBJbIkJLw4nwWbHWJLeUzogCb5Q+OMkhni5Iv6Tb3Wp/sOO88+A7cjh9hyiucPXdOwmGg/aFW1SZIZKs2WLg/e/aoXB44KT4Y3DMGL5/A59NKPmih4jzVm8hqIPjxzls/RR4iqpBvtUIyrZ4LXeFp/me2Pz9f11ODmr+snuj57S90AK63zM1sc6FtcBqZHwSD/vlH/1mOH/tAmpGHRx97UXbt2g9QtW92tYmJAfn880Ny9Og78rs3/hnMw7h897v/4Zrg0bX6uNScLHX96+WpcZ+Xuty18rRc0yk51t+/XY4e+SEA/NiqAUBW8WVluQ61sV0jA3c9A8NgIX9YjMmuTd3SxhvjW4wbYX5MTU0pdsJxMKk5qt9vrkii2vSlV3aJbINIR6cayX+vskk6OzvB2O9Y9F6PLI69e/epI7XYfSGZJF9/4euQCK0tuGwj+NG3Y598/bmXxdeq3UvNPxUuXzgtP/x//pPs6++Qv/rDhwFq7AFj48uFjUyuIB+duaIAkBceWi//8d+9JHu2cEjWl2MCMsL/9Jsj0tUaVyDJn3xtj7RFtIFj85c+cuqi/A8AIM/075A/2/91efrAY/MXmf185MgRBYDs3bBOTVtM0uuaDRkzjAwYGTAyYGTgjmWAgyCGJtJy4mJM0rm5wRAhj112rg9LR4umDnLHOrBIw6uJAULQg6wLazEoXbJfmYw3GowvtKvXAicamRVc73rLNbZL8GQ+S2Oh7d6paQRlWs3b4EsdVwwWBvNheITcqYzfmXYXBUBMtbJEzRlJQ3LKD/YDg+Xp5lBAKG4zg5E7NVCgGVY83DiKGNmP92UUr8js8Kfgv6DmcsQOCswATSh5lcArQZBwPizJQhxm3KNiG0JRHbX7DZHeWQAjjrUd5prG+sB2uO0EivItbRB6AvmDjBO1LTdkqlB7LxbRH7BTCqUvl0cIrJjg2cBwohCsHNQRlIsydYXlkODiCONzylPNQRuaZ4glqK2XoPQWJJYSl6riaceFoJICe0O7QS9UfEqmSbE50McOsA8Yh2zDkgH44ZzQ8hfyOCCjBdQSxX6/p0Ocdsg7ubS+FCHx5HJA2onUGkQv+miD7Jg50C7OJr8E21C4B+DRGO6SQ3bIZulOJMS8TkNhqcnLIvwXIxPS2d0PySzgDNaUuCp2idoKWB2MDrA5tuzpl9j7UXnrgx/Kxva/kR2b1ggwKiFuNDo+LJMTV8TUARACQWAkC7P4qaaCeCd86AuPvybBRRBnpgSgor0ieFESXf/+z/8n+T//+3+RgyffkTXfeElqWc1LoxMAWCrXJASNrAB7agQ5GPWkJ7Ng4pQmMcGhHE0amRdki1BOi5ACwQ96tqh8I6w5gCGg7hMcSeaTYvHRYJ2QErGznAJbci4tP2aAIVKBkXqDHiTBMpqgE3wLw+uFbQWDAZVHAiodrThnIenkzQ9gGvtFk/W5B3pvUwnTgmKBZBrN0Lv8XuzHBECjCi7Xyx8EP14/fFnePzYy2xmHzSLruwLyB09tAIB3672kMfylgRGMKt+LXIBf8+Wv4fIn4gZ6QJ+KEycOKobC7t1PqjVZoOc0yjR1d7mu2kdKX/G7R9bI6CgE6cCSY1AKqwk3DvQPYZRK2lWREkT0FiFgUoIhMuWyOE0fdam3p89XKyOaYcLL/PJGpFFai8uTfVKBrqZarqGt+qro79wy7NP8Nqjlzf5xP9gnPa61zdkFFnjDbQ1cPiOvvfZDBX5wpOTzL/ypPPXUtxWA5HDM+cPk4DG154Fn5O23N8hrv/qBfHLodQBpERhGd0tX55qrWme7KifX6GNz89Xnsp5H5kXfZ+0aqeWdjXP/OPpXlytbSi71Tuk542e2P3/7C6RmxUxigYs5zuD6PD4Rk/51G65p7rxiOo2OLP+4rZWUDaMvRgbu/QzMBz+2bNm6YNGdmSD48YtXfyEHX38dgxfs0u2tqVH/njqboPGVhfSfv/uF9PWvlz2796hroPkevcIsBmjoZ9FSl7ve8jcDfqQvnFsS+PGDH72qNr0U8OODg+eXBn789A1JDjuXBH78l/d/rLZPlgiBECOMDBgZMDJgZGDlZYADuc8OTssPfntaEjOsf2n+p5GgU0K4N1huAORGf2uXI8ONbI8AZKAIfMxna9xsv64FfNxse3djPV2ei7mwY2i5FQO2o7UvDFbI3Uj+bdrGogBIzYRijpI2GlV+FvTAoLeH3+1Unx1pMDKKIwrUMKG4xwH9LlCc6f3RGGR96GwMO/RdqYg1BQ3YNSEwK1p9YIZUZcpfho+GWwEYpam8HIdXRy8KUoJ6rXutRl3LoTBH5kZrGEwRM3ThUOhOwG+Evhw0aM/Aw6EMz4sW+HX0owjNIHAwntIoVjrjgtNb0XQUtfNj+RGN+RH2y95kK4x5r1zVdwWyQP5KMUAQBEEIfjBqKUi4BOApAdNzWK1DykmT/yLLgT4WX4yfk+EWryrku1AQ3x5eD2PsFvHYreJ2AkhyANyo5CUNCauZbF5MaYiCrWmHpl1VkpW0zGRK4sdnPxxJGHaASo5UFTAEiu14nwfANNMMKTK87w91ABCAtl48KfHUhFTSkwogCsCThLQMO+YXJ9E+5pl9bTIRB8oB6a9nnnxZfnX8VfnPH/5I/ve2/6i2Q3DG7ozLUNpKQofyL4nG1CzFOqF0V7zilp1gYTAUCwTICUERPXp6e+XPvvGn8n+f+hdpOv6RPNP2sHhqTQr8IGuDwEWraxKSVZppEWWvKIXWGNPJDJggAC8wIo0AAz1EuK7VDsPzchKMjnFIr2mMDIsDxVzUPZvdAUhdxVGEBOgEVIVyVjMA/fmZXh1kaQzXGUjclg588L27PSxtOOdGUyX4ESCndQP5mfQQABGU3LDfBGTGB4bED0YA+2TDsaSclr9TG82uzJqwXMWVE8c01oHPDAuryx1kfgyMpOXwmQmwaDQWSDMkcBicN8fpufmeZpDrsdEMRg/2r6oi8EJ7TObC8PA5uXjhMwkGQrJv3xNqsTOnP5FTpz7EvK8D6Om+SiJiZiYhJ08dlsOf/E7OnflEUqm4HD78Lgy/x6RvzVbZu+8ZgHEZBaIUwZrbtGkfRqbG5cMPfi2x2JjaxrPP/pECCopFzTNDB1yiE8Oq6E/goxvgwfbtB2TX7sfhWwT6Z/04Evz44otPwFo5DPCuQ20vGLjah4UG5GRkXB74QjEwGv1MyHgZunJBjnz2ppw+cxzXwslFt7lQ7vRpzMeJ45ACO/quYn4885XvQH/8e9LZ0auAm0aAjIDiWsjlOZ0+de389PAbkkonkK+kAm10oEcdl5HL2M+PFRA1Py979jwhW7ftvyov7Ad9XMgy2bhxn/gDrVetz/76/C0qFwS6eFwJTOm5dOE3hdP1fjfuM/um54zTeUw3bdqzqHTI9fJ2t+c5nfjdAmvr0sWLyMEDYlkFxrOrHFu924fY2J6RgWXJQFd7m+zagME+Du1e42Y7cbPgx7MPhuWRXevE7V/Yj+zwZ0cUk2BjR7uEwFJvHBRzs3011sMzQoPs1VKZHwQ//rc/e1j2PbAw84N5JfNjpYAfL29/QjJ15rhxzI0MGBkwMmBkwMjAjWZgpTJAdG8PenPQuJz+HPTv0GWsbnQ/77XlCYJYzZpROvetV8AYRa3RkMZaHUd6UQCEDBAGi/8EAeBPDiaIUyoBsBlQVycbI+wKiQMoKhkik9EpKYBKzjCjOK9JYGF0/iRYIi1BNcI+XfRJwQoJI4z+pSE5mSCa7we9H/KSxaD9OGSIauMZmcBofJrhYUi1FLF+GN4LEYAvEUtYMm5IJ2FTPUEACah8p9NZJZu1c/su6UOBLeTVWBTsS7m6QyxmAC+kidRjYjorvz3/gWJ+bIfM1N8G9kNLKyODgWE5M3BU8xDBsgRduE1yCXQWjMShK4wcJJqQkbq3SGu1W2nhZsEcYAzkxyXbphXnd5uCsnbdDqklUaEvVcWFPpP1kYbHRjY/IfmpejkH25q4AJ8UGIoTxOD0dK4ILwuXBCwBlPPnwA8HpIsEwIdlyqymMyJoPgF6Of1BTgM8sHgg8QRJKIIY3mRZMScYBD/I6OD+mAEU0bD8J7/+B/lvP/9H+ctnXpTWYLtcPlFQxugegB9TTTkFgvCVwVc/wCqdAcK2NGsVjWFBdgltVDZvWCsvx5+Rn114U8mKPbrvaSEDBFCOtOHYUtILalOzYXWmpYiRyOXKEEYha8AIjdK9Pi9Mxq3SAlBoEttlEEwp2n3wEMAxAhBG3xV6iRDkYFAWy2r3irkZkmWYr5uuDxebJQrfFkbJiws7Oj40ckqCBDTgQVJrgfcMZKv8lpLE8jBYL2YUIMSi7PhMESb1LrHVmUQ9HT5JjE1oYEhtWlLZJhSMaewMlgwKudaKDQwQMF0wUv1a5pCqI3chKHXV2eqSfZsaUCpslwwQzrvVoHHm0NCwyhWL2JQEaixw32r7d3N99j0NQJUyVsPDQ5Cw2qfMokvwmVnTtxV+Fofl7JkjsnXLvlkvEK6Ty+fU9I8+/KXyuSAgQcAkFruEEakxWb9ht2QzKXnzdz/B9WoCLJHLygycyzBoAk7WRblckXPnjssrr/6d8swgkyQE3WpGEqAfJbkIEDz8yIvwWvmeYlMQBCHDgeDHKz///6R3zXb0dbMEUNhvZImQjcL9IsuCXhvskw64EBj5+c/+TvXHARDXqX+X5m3z5W/+zZfAn4WOz+RUVI6f+EQmJ6Oybft+efTRb6j1rmUIr3lSdMs3fu8v5ZFHvw6AphfLz8lVEPwgKPFzeLIQYLpWXp58+lsKaNEBKh6Xjw++rnK5fec5sYFGdv78cVwTNA+pLK4xPKYEap5/4aK8/PL3kbeQJAAA/fa3P1K7xnXmG7PzmBOUIhjzkx//V4Ao7fCHWbdQKlb0NDKPCJidv3RR5dS5CgAQpN4IIwNGBlZBBoKeW2OX3gr48dyTDy0oo8S0UfLov7/6scogvSEuTs/Jm66CtK7YLt4s+EHZq2uBH7rs1UoCP7ogN3w2Orhij4PRMSMDRgaMDNzvGbA0mWRrf4t8//d2Qh1mTgKcg5B722/H0M9by/BKZYDoBuMBcx8MzDcYwMcCh3k+e6VLHlTSWGNyZIGljUkrKQOLAiCNnZ0FQTDiPnFpUswOSl9pWq4spCsTdLtWlsgCmcij0GadwYhldwvegyVQlxeahIQUZYYCNgcKhiZpAhOkWkExm8CJpUmZoYewvN2vF2phlo62uL63h6AG/ETACKDptROj9AnROAsW2frI0wr40KV8JssmabFowAJ1f1sgpTUD/T++z+Ei+En5JHktAjEt6XDVH9BQ3O7ds1E693jk0umKXDZfUCmAa4aKGdzr0ii9aXBGKtANTMKzRA9TxCVJyoh0EyaqyFk1w6/abwnukmaYrBc8FQmENA8Qgh/TqYwUyg7xYYfJdlAO3hCaIvODUlDUE2uBJBaDTA+CLTrzQ5cWA0VH3FNYUCOJaPsc9gAU2iSXPj0ob174QnbCoK/tOUhy4fDEqnOIA0EcJ5ge9IN/7tEX5QfQ3n/zo+PyjecCcjk+Kd39rWC3qM3Pgh/WWFaKYae4J6fgId+qmC8MjSkUV/JXjUET9P7Tn8qlk+9Lf/cG2b5nqwJoEvA9mfZAliylsT5qkAwj+EGgIYiReChBK08RlOPgB5ASf8ijgJBMZkx5e1ACi9HiM8lk0q1eaY7Oc4N/yZnJukE60gigROogke7pQWP1C6mkOq8aY/z4oLTt8Es0A6+aKU3ii3JYSrIMoMjQFGS2IKeWCJtkfHpkFgwh+MHgSHbNG6IigwQQAX5dnZGrNnfXPtDn4xsP98tX9/VetU3dA+RWO0IZoBgK3T6wFziifDUHR2SMAZw4euRDZdK9e/ejyiyasXbtFlUsP3r0A3no4echz+efZTN4PT6MoH9G8oWMAhjIACHIQLZGONyhfFEuxKMK/CCwIvI2AJQ2xYxY07cFUk9rlQzW2PgV+Q0Mw99952eKffKHf/S/KnkoAkvJREw++PBV1f6bv/sxwIsgAIPvzTI9ktjmdHwKxfiYAmwWCn7H2Df6bBBUYExEx+Stt/5NsVP6APJ885v/iwJHKOfEbX766Zvym9f+QYE7NM1+4YW/AOhy7ePM82FiYlCGr5xU8ohbt+4FKLNpUWYEQRAyQfjXaMBKpgWZHzRSP/jxa9LV1SPf/s5/UMDU/LwwN3abC/vwVwqg4m8FgXmCUrnc6wrEevLJF2dzGoteUaDK++/+HNJbv5GdOx+TB8AkaW3twv57ZACA09mzR77EqOF5kkon6+ySUQU6BQHCmwG2r5YgSEmptHC4U4aPvCvlBlnAlbwPBgNkJR8do29GBm5PBu4k+PGf/v4V1cmXntoj09MJACCUXjXiVjJws+DH9ZgfKxX8uJU8GesaGTAyYGTAyMCdz4ClySxdYTekrr78vEoj9OWOlcYA0eWuWkobZSFj8uXO10rfPg3hLVBEUmowRqzYDCwJAKHckR6UfrpyKar8PPyouVOOqjFsNs0UHWOh1WSyNroxkpg26DSzZlBmSMD6iCe0ZexrTBKCCbr+OZ02Q5PcLg5bTZmt+zwWCaEIaI87VQmbclcMFrK3tWyWSgQXNks7imVgZMQAzjjro9mnSjIZmvNoICBC/6O/P/tzOZUeRGGsFYXIqHq9MnBOXpdzqt3ZWKiGpWrH6Lf+Ojc4Wc5WUCy3aQXzxmbgBCAfmT9XjBkVY9oLpxMcgdgwhhHjDxiMH8bpCbITMsNqOj8z9ifWiA2FKRrtVWJWeJ7klXxWgOgH9rEZxT0f9Z9UY1p4AJyMDl+Qt954BwbqIxLeHJHc1l7ZAkDhFHKaGKhIN+TBrtgA2OAwBvatk625aTl44bB0DG5U4Mfarh1KIkyGcbzhk8JIgDHD95q7BtH0UH2LfO+VtfiXZumMEUijMSiz9bt/+js5fvhD2bYRF1UYyOfTMRjAewCiaMGiLIPSW6XxFvE6SjgPXJCzsqCAGVfsC8pPMejHoUxD4m5o13PKjCpyMgh+MFpbN0kiNaxAEKetqiSwdKNCmpzzfdg8B2DFeFyHhqQQi8q6desx+jup2iTwwTbtZYBenohYy2l1uAjKRJHfFmtELBtDUk6ps1wxQLRwKb+Vj8Yvyl/DeHK5GSAcZUBw8E6NdyiDuZDKFFUxlX4KqzkoE3XmzGEZHxtQzIuNkDRyOrVzetu2h2DW/bZiYdAgXfeyYCGZ/hGUP6KsFQvp9A7ZuGGHPPnES7OeHVYwCVhUz8zMgA2Skd9/+dvyyCMviMettU8Wx/lzRxXDg+DL157/c3n2uT9TAAfv1SjD5IOcB78vv/rl38vHH78hOwAC+ACE3EwQ4GDfCXIMDZ5T23zooWfx95xihjC4zXBrJ/bPqqSxrNhPemgsZAKv94ESUqnkNIy1J5X8FUETfR8X62cj8KEvy2PCvHwCJgf7+NWv/pGSCyPAoefF6fIqpg1N1JkXglFbtuxVvxWNceDA01fllEwRsj3IKiEwNYLv9a6dj8AUd40Cbs4B/Lh48ZQCxRolxarVKpa9qEAe7uPmzbsVUHYthsti+71c85lvnpczMDcq1+XxlqsvS93u8j+2LLWnxnJGBowM3EwG7hb40duK+1AAIEbcWgZuFvxYLubH//WPb0vz9I15flD2iswPI4wMGBkwMmBkYOVngABDPF2Q8XgGCgtzNR8nPFCpVnI7/E9vJQsriQFC8CMMxZxZuau6x/Gt7N/9tC4ZIXbUVcmeMQCQlX3kF61SlmtaATs9PaX8JFijJujBUl1zGdJXmEaJqDEwPRj6Z76nx0cnCuA5EwxrG7wd6BfCArIdI5297RElo5XDCHsXlq9A3miyMAHwQytiE/zwO8zKw4Hv41hvaDoPiaKLEtmyS9qU9AuAD7A6JFZCUQ5FbgAffCWdgu8JEOhBz4zxkydlCEX+SEef7HBsgjfH7Oyr3mS7IAGDwn9jxAAWhAt+4etC8f4A6PRjmiZW+84HpD1jlons5GyRf/46J2ta0ZzTI6aQjNU/870ebU4UYNc9p5gSZD14Ojul1dIK6KMsDl9NbOaKWHQGS5ksEheINCZIlk1IYhSUFcTFS8Py6jvviT+zUX4Co3dGb94qv+N7fZD4cbx1aT8O/zb6mjzW95AcvfKODHIZVs3rYFHaB9Co3jkCSGxnGv4pjGAsIYf4nmAQg4cRf90t8I/ZtVYunjkl7174XAEoPQByat4vo0zpLIAeE8AUGp5bcV44CxKrwTQe8BelpRjxGTc8TUyYVx+xlyUbJH5VodNpnwQAohZXPiAEPSiPRUkthlYU1dqj/BUBkGnIEbngbeOCBExrWyv8BaJKE5pgCQEfa2VSFcT9fT1yGqDZzIxFOvshD9cAftAfxIyR48lqXH588CM5FhqT75UOaB1Zxn/zxYqcGZqWsZgGXeldoQnYjrWtYrfeugwWWWAsjq/mINOA5ueUbiKAsXbdA0IWk16UJyuiq3srCuWvKQ+K/fufnQUCWPheaPQ/PWCsKJDPlwQjY2Djxt3wqwireSzkT02n5Nz5I2BPjMqGjXuuAje0ZUxKiomskvffe0WBNJcunUDxfc9Np537XCzS+Dyt9pkMlsbgDRq9Rl544S8BHGhm7QR7rhcEctJpXNvRHgELjyc4awJ/vfUWmse8kGlBFgaZK8zL9u2PzcqPMS/sI+WymBeCJCovAKk2AIBqjPZIn1pXB4y4Lo8t2R6UsGLe6dlCgIeAzUZsi0DQxPil2fZ0k3OaxTP3BHnISGkEyhbaj5U6bQUMgrrh1BgMkBtOmbGCkYFVk4G7CX6smqSs4I7eCvjx6IGFPT/uJPNDAz+ab8jw3AA/VvAJaHTNyICRASMDC2SgCNDj2Pmo/OitM7Mm6FysL+KTP/7qZtm1Thvcu8Cqd2XSSmGA0Ni8pWawPm71oNMU3YiVn4FFARCLaQ4tJdDBoG+HlwbodfBDf23OAizwaQyQKrwpJDVXaA0GMToZQ/0JcljB7EjY7QBBMCofbepeIf48DMUxbz0Kz3ZfWZoSKHEUIFlFegKWtViaUeyHaTr8SCJgEaxZtxkiSWoWqoYlZQzuBuhB8INyUSUahJCBBDIFg4Vs+mmUtvZLOwr9D1nWyVc2Pq2K2xjYrGSZVEAGS/lrIHIbK3Pv69qBnEcJLf2VHhz69ggWDI29Ldub18iz7kfF6ZszYK9FrVJxxGFC7hSzNyuTl2Hw648oD5XO7n61venxUbV/Xvhy0Lw9S4kZmMynhs7Le7/8V2nt2wh9+m9KM0Z7+8Nm5WvSbCqi0JiGr0VMmsDg4KhsizckVzA6fXxYk6eirvsopK1avR0YwQxQCGGFJ8dmv08mTJNSGYxJR89mTLVL+7p98jkAovflY9nl2lgXOVOrIGZkc41eBCm1Hsec8xg0RzUeh/6+Dctwvh6EQ1xWv1jXt8nBo7+Vr3X9saQJcqDfjfussY1s0lxnKk4maxLoDsNDAx4wOTJByDBoliQN06dwXpSwIM5DUmgIaHB+JotpNey3SRulRfaHQHKtVIYngu1ILQAAIABJREFUDVgdZILwPZf1uUGNhNQaT5GuYpcU/NrJMh2fln7FZeF5U9Ykt7DNyTj8PADO0QekHR4BIxMa8Kf7ftAY3d7nki/GRuXHxw4qQKunB6bCNk2uazYhy/AmmSnI6wcH5VeHLs5u3YZz+eEtnUoH0269fkF7Gbq8LJukbweZBmQDUH6KBXVKW+nREgrLju0PKr8IGqRfuvSF8ovQi+JLHf1PWSh6frS0dMy2zWI8TdJHR6+oaX4wPcKt3V8CVcygV1KyiGAcGQt6wZ5Ay40GJbAIHtAYPBzul4FL5xV7hRJSZJZw+9xnMnt0uS8WzOeDOY3b5XwCCAQIGPQTudXIZNLKL4XRBpYFWTCNwf5oUk4ds3mZhoQdgRg9mHPmlOtaILfYuA/NuH7QYF4PerF4wI6hjwols44f+wCsn1PKG4YskPlAGUGxri7opNYN6a/q3Cr5UOBv+CoJgwGySg6U0U0jAzeQAdzyy4XBK/L+sUsSxL3o9773PbD4tqrfqPmhRnbiXu0Xr/5CDr7+utDwfDHPj//x0zdUM5S9IvPDiFvPwM2AHxI7L2R+XA/8oEH9D+DR4vc45C++9azs2aLdk8/v8cTYsPzTb47IBwfPo7318idf2yNtkQZ6fsMK9H3hOWAwP+Zn0fhsZMDIgJGBezMDrP1dHk8JB9g2RhH1neWOhe5t7mafdMmrLvN+zevDYH3cUvqtNajGFIOoSmr1wVtqzFj5jmVgUQCkZtIW0UGOAswiCH7o0leczlDzWY9GwYvLMGguragenA9vkM4OmMrGY0rqKgMghMG2MskmceYdMpy/LL0wl+6CiXcmUZKMiQwAjb1B8MPjcYrLZJctOx+XDT0sSFakgFHtOVCNcmYAHpkyQBBNGorgBwEPhm56lIaxNaWoyFoIrVknu6oPqXn8I4HFlgPoAWpLM9oRGJTnq9q+cxfcJYfkmzW2gGR9knfCByPhlao/K5WEVQq2tLznuijFqZRilqyPPAhD9j6xocjW7dNkZLztVZnKJiXbnoCBd0iC7R0K5Ai2tSlfEPY54+kVV5tfGZ4zCKxMXCjLpVMfy/kzl9Qf4/t/HZYy/DKSE3V0B9NKiWmJwZBbjy9OfTH7nm9KsSlZC+P5rW0tkh5HcW8T6H8AmyYm/HISYMjjOzfgfVQ6UmDiAMA5MXZZPA8H5XFXm5yeLkiX/Wq2Rk+mDRSvMUlMliULfwk9Nrb7Z9sdhkcKzdMJ6KTcSfGlI/LWBz+U83j42ZSNKF8RvbRM8EM3W2dbCiBBNAP8YBDYCAQ0ZhDZIHHIqGngh5qtgAmCGi5nBq88d0B3BAWEgAeDoAeZIDa7VTq8zRJDsY+m9aEAzj0QelzOGSnU1d6yWe0cpoE8DdQd2HeHpx2tAIgBWykBKS6HA2wcnJMDUWzHGgDQ0SfRclT+8dKIkAlk6vFKj+sB+faORwGwLfpV03biDv5LsIxB0KPRCOwObnJVNs0CN02t6aNBLww3zl0yQgD9zu5PMBiZLbLrZug6ALLUnaZkEv07KBfWWIjP4hqRhBwTw4trB/1U5oMqLN67wE6glFapiPMaQCIL9jcDgOj9JXviuef+ECBhTL44cUgBK++991PFdunv24Tze6PyBNGNxRfbT/aFgASDAOytBPPD3xb2jeH1+Bf0mSH7xuOGMGA9L6nUtAJiGsNmd1zFFLtevwjkEKCiBwx9X3QZLBrLU/5qePickr8iw4WgGIEiI+5OBgwGyN3Js7EVIwN3MwMnL8fk2GkN/Pj+X/+VrFu/cUHwg326UfCj0fPDAD9uz1G9GfAjfeHcouDHR2euKPCDcTvBD54DpmTIYH7cnsNvtGJkwMiAkYEVmwE+J1ARJQL/j6/sWSPZ/BwAEoKOv9+tPaMu5w4sNwPEWW2XdeZnxV1rk/mm3suZl9W6bUc1AM/pjZI1jxsyWCv4IC5alTXRxLkeBDkIbujgh84I0cGR0lQchWEN2MhA9oQACGWrSMPwYAR1Oj2HhnEewY8w5KvGfEkNLKmrJs2UmqRMeZViSbE+3M0shAOkGB+T7QceU+BHpVJFgRzeF5WS5NJAdQFa6ICH/krD8IKjogq+ZA0c8l+WK1GtELdfwgA8AGJQmoseDygocllzBRXwVFCm67JH3HW2V2oGsFIv5qVNABnYjA0gS6okBUhQTUGW60RGG538EMCPXR0PSQtGLXG7XJ8F5xqus+4yivZFvzhQ/PQF/KpvTi9GoUGHVhx5qXng6wHwoxyC3BPkuiYuXJbk9GVJJSj+BXWtmYJ8+Npr4sc2H9qzB1p9NSl1tmNUcxBFxjnGzflEVgj4MOZGX1cxujwrtQxMjf1VWff/s/fmz21kZ7bgB4LEvhLc900itS9VWqpKpdpLtXjp8vLadrvb7u6ZePGmJ7qjY2IiJmLmD3g/zPzQ772ICb9pv253+9nu57arbNfedm1SaS8tpdJOUdx3AiBBLARAYs65iSRBipsoiaRY+SkoAInMe29+mQAyv3PPOQA/GG5HgfB4xSIhiaBgyPja04fl0r+1S2v7DdlTXyjeJBACW6F6342iLR9Hwejw4tDQDwaJVNt566rVI4EUgh/eMEAfXxy4kgZzOH0eeMLUSc/bJ8XxyOOQH/NgLGCxxGskZdds47k9wRCCJgzdGN2dwf6Nk32SjULtuCVDmhwZi8CxBWYw8zgAPoN0WhIgyJjYS2ulxO2S3sGEjIFVxIgqGa0wiCMAUCABFAoGxWJzSR+MouGQoJ57AYJYrPQ2oLZWAYCWKcz+1zxC3oDXxztn3lKsD4IfW8uekCP1m2WvpUT9AK91OO0FcuRgrbTUztZ844WBV5dQW+tBrnH/NO6mz8Ply2cBQgSVjNJvYI491+uCjB8CBPTx0M3Q6RFyr7P/ySpgJPB5ZFjhyzBf6GwHnbEwAfmqdHr2zJb5tltsGffx0UefFjsAl0+PvS03b36umX9Dtu6ky6XYMJtb9is/E66ns0Hma5PjM0NLlLJXBAcogxUM9gI0ALNvmQyJXJYJ85Jr6D7X00MfA4EiMjn0ICBKYOhegjJYBH+4/7kyWGSWEPyi/BVltVpa9k3/RtxLf8a2y8vA2n+jLm+cxlpGBr7sGSCrgzGBa/r5IhzHRCZcu31xIyRd3TGpfWS/Yn4sBn6MjIysiPnx4mPbDObHfAdhBcvmgh/PPfOaeDGBbb5ob70qP/vR38lywI8HyfwwwI/5jo6xzMiAkQEjAxsvA7xPoNH5tvoiGKG7UZCemTplhez3eqh9rCUDxIZaa1X6gAF+3OdT32eqlWEAS2O6BNF9bt9o7t4zsCQAwi7SkaikUQROYbZ8ruk5wRDO/J+AhweL4FHQfZwoM7OYrkcSNzxkcQwMDKuicjSep7w/CsHkyEzEURyPid8VkbwhLIdMEfWsyBKxQH7FAR8GzrDPH4GXRWoK0kzNsnXPQdU0v8TSKPiPgtY2gTq/Dnokg7O16ynpxOiDrv6lvA7F/tgKlkVzdEZ2huAHgQgtcPGueRFLCrJaBTQYR/E8NwhQxPJCMhJOiCmCWcnQ9jpr7VZtb3IWSeUgDK9rNNCF2+WNu8Xl1xgpSYAtBWmHkEtS6IAZMozMCX6k7BrwwHEoOa2BhPTc/ELOnPwIniiRLDMGDJpJgEwAHd566325catVnnruGSlFkVFcJsVC6E1GwZLwy8SA1l7CDsbEaAwsFIBRDqsUY5Y3YRKyMmj+ZAtUSEdrqwZAYXl81CIl1S5xwFj4oHO3nOy9IOfrNsueLOjB/SFg4naUApjRGDH5kNFKQ1qKYRmKSbwwoJgcfJ6P50SLbieAbiHFUTAy6hu3ycXTH8soAC0CIEM9FVJceV2tnyy+U4ZJN0cnIyQSB/hkqRTHZFSsgw6xOB1g2SCH+AtDcs3uLJcxeK4wkqEIQAsPCsM8P8YAEnnVsSRIksh3KSZHfgYMEbym8TkwMTBA4mjXKREvyGvxKCSKClEIB/vD5lVm6vRgUfsJtgkBkgmYz5vKnfLP/Z/NYn083rhPvuKvlsRIr/zo6pvyH5/5UwkEZnxdVCOrHPT4oNblWutdrvJu31V3ZBlcunRUAR+M4eFB5TnB77Hc4Hcal1EuK9cM3UzJunsInemRC2ws1JwuL7XQ+ytZTkPxfTAOp+eJMvfuuaVkn8h8IAPiD//2S8V4oGzW4cNfvQMYyu2TMl0eb6FiylwHiNLT0wn234iUFM989y40RgJRBEsYNBObD9hYaNu5QAkZNvcCDpHZU1e/VYE/J0+8o/LB/SC4cvXqeQXu0CidhunLBXcWGruxfPkZMBggy8+VsaaRgbXIQD6+e0dxnfzp2asyjGs/O5iz80Uc3m6X2vogLzguLfsXl73i9ncLfiyX+UHJ2O6envmGaCybk4G54McrR+4f+PEPBvPDON+MDBgZMDJgZOA+ZIAAA2trhZ47pZgt+Xf60N6HLu+qibVkgFCqid4fBvPjrg7ZkiuTTeOUEtgYGCyQJZO1RissCwDh2Ah+6NJWfE3gIx2NSz4eCYoU44+FcEdMYx3oIEh8gvirVjykwXllBQymUUynWXKUUlNhFLwn4LnhyyjAgwwFbhNHbR0vVaRQ5HbZ3PL440+gL9xQwcsgnkwDTMFs/mhK5oIeE2BZjEMWpgDyMR4AFBHMfNelr2jYvS9dCwQAYAsYAyyeW6bBD2rIa4wN9qvADwAUlL8iKMFHBp/HwPwg+MG+upxh6VKUEJEd8rT4KjTdr/FYQmOfaGpW6n233aIMxMkIyRsH0mJPqtneZv17GcSCUCosoz2tcuniaZj+ajdjmx/DmBE+85Rigfhc2o1kKC8gHgAqfb19yqdiAuBIKBRSwBH9OJo2bYb01YnpG8u6Hbvli+vXlXTV7c4pqVetApjAGM62DUlztSZfRTbIV488JZ1vdCoWyOFtT2ItjflBDUWCIEoSC6wQiWkMDDJ6QgWQIYP5umoTuFESQAhBDwIdfMyNzninVEizOJzXAH5opyKBGYbO/uBznQ1CuTAG4bXJMbOE/DjX4AsAiElc7inMXC9SxzPiqcIxBaCGd1IA7lwmGMXbscw6rGS0CgAU2WwAmnD8KYHFczsKAG/0xjWJgMHU6KhWYEUqO5bq8hIFsBEQKcgPYdsixQBJF1nlajpPfnfyt9Osj2rnJtHBj1jbeflvv/2ldA4FJX3oO2rsaxn8keXnJsGTJCfITnHjc7yWsxDWMi9632QZ9PV3y4ULpxTosXvPk/LCC99SLIb5goyGjz56U7Ek5pqhz7f+cpeRaUY2BgEWAmz0BKHkUq4Mls6IiGQ/a2QpkC23nCAYQMbI3CDjgkF5LYIU/Nu583EZBdtreLhXLn3+qbz99j8oj5BPj78rW7cdxPe54w55Lr1dggGlYFrRV4QACBklBFVopr4YUMB96+xqVTJklB/btfuQ8tzQ8qL9KHD8zAuX5waBE93MnRJjBCopxRVPrFyCS8lgBUpkS8tOzfcFYBD3YwLfGUNDkGoBM4RG6TwGRqxeBgwGyOrl2ujJyMBKMuDGhJ+nvv5VuXrlkgxOgpk3d35STqN5pQ752vefkn/37W/dV+aHDn4sh/lBAIZeU5P4jcw3zw/WrCQPG22blYIff/2nj8u+RxY3PP9///m42Au98r/96TP3zfPDkL3aaGegsT9GBowMGBlYXgbSUIz54vaIfHKhB/eCM4oAlSVOeXx7hTRUrK1p9VrVXuj9QdNzi/JZNuJ+ZoCAUrl5t4TyrxkyWPczsfexrSUBkHQGRtsw5RZIPCmpIwSBjgqAGWFLUhWj3WqWv1bsDkD2KcaiXD8ADhTVPTDj5j2PPzOJYrPm4yGZBEyhKUiEYjbuiTipPjmimf6SJQI6A8gC45C+sogHn8t4fjGKYLvFBbPqKPT7xrIgSy74QSCCodgAbAHFL3DqZQI3YIPwHOnyaGPfb2rR3lNrAYjAdpmUtg0RYp1JQlkqBXgAuyHgoYdudk4AIwlgJYK3TsEbgOyP593bYK4OlgzYBYz8KB4h/6RLYOlSXHyPQIsZ4EcefERsFWPw/9BYLJThIvjRMaDlI1DnAdBiQ/E9IZu2bsaWN1Bo05gXjz6yT3ZsrsEsaxbevAoEEV+l2rbcWyr5gahs8sfFDiDm3IXzUoLxVtLbpP4R+NOfUWOcGh0QeKkrFk6SYBT8O5QUFkAOB5Cal3Y3yn87d0Y+IQvEVCh+bB+AdBbX0aPCMSW96N+HRxlOTrOEeF6MZIEhgh86S4TnRXnLJrnWeV7aGhqlVIoUcOKHeX2uB4jePsEQF44jZdBMYzEpwbEd9+B8Agtkwpy9o46A9WKeADCFnSnUJHCSYA3ZJv1giVAqKy1pSPtEIF1EEa1EYliKioukr49gQDsM2gGC4FkVHqsqm6QJs73tMD0fhJRXCfaXYEmxs0QVVBOYrTjpKZJ3uz5SrA8GJa8IftDv4/nxoJx855fy98f+AJ+RcbBbGiV/DotI37fVfIzgc/O7421y6kqfJACEMGzwJtnZWCzfOLxJAt755ZZWc4xr2ZfG5rgiHe2X8L3ll6ef/oq8+OJ38b01U9gmK4HMBkYsNgaAIqkAEN0Mfani/lL7x2K7E+crzdEVc66/EzNoh+7w3eBYBwe7JRaNqrEWF5eJA8w6Bs3Lc4NeFTrgQHCBBZ4xAIO5rBaCY+PjYQUUEEjxAqTmNvwLFBarv3IYj9NsndJfC41r7v4RONi9+wBkwj5SeTp27HfKVL2yonZe4ITjC8H/5JOPX5d33/knmJ03gLlVpMbDvFRXNykz8kF4H9ErZW7w+AwN9ShZKkpvFQXKlRTXvQTlvHgO1NVvV2wWMn7a2i6pXLAfGqTTKJ1MFSNWLwMGA2T1cm30ZGRgJRlwOF3y6qtfAVvwKbDwlpYidEFq0e8vXHAyxkqZHwQ/Wmpy5FMX2JlAbQ1+Y6rFvMzJBAs0s6EXE/z48IMP5XfvvCn1O/fJ3TA/FgM/dNkrt9ckf3OfwY/IaEb+6sAr8iwklBeKc+fOyX8++kv19ms7npIqnyYRvND6xnIjA0YGjAwYGVj/GaBiTEffqLx/FnLymECtR02pRzZj0u9aAyBrxQCh94cvr1bMk8ZkjwdxFhtm6A8iq/evzSUrQ/mmKSWBNRQdme6VheregnHJdKVlsjQuEbxmRFHEJgBCcASi6KqgTX8Qymclp2YkZOibwHn8nN08hFn1ZA4kIY0VGR8RR6NdpuJWCYAS4S10CObqyxRMqwMVjQr8IJKbTN15I2WBn4YKsDLo6WGNp5SfRjI9KZc9VxRA8ehoiZT4S6c9PyIoGNKkfEb+CtuDpEDmByMX+NBZILnm6pTOaq0GUAH1JLe7ROqmmjA2zQPFjTG7C1A49eSjKE+Wwuwg+wNQjhRkh022CCM52I+CZ4/4rHmSBvvEOuiVkfEhmHQDAKlqlEOejBw7eVPiMDN/4omDUtq4SW0XQYGeQRZEwApD+sQtqa5pkOHB27LjyZmL/mAwJLUVLTIwskXskzclD0BJsPMWWBgegDcEGWaCIIfL3iQVebdUoX9Pw6vTb1IiKxLDjmeDIAi9QYCU4Q/MjJhWNFaMjuw+jmZlsriJE/4hBAYuRK/Lk/AYsVyPSAjAmh8yWFKo02HAZikCb2iY5542NspgTYLdE5sMKwN5PSbMIYkP8xxLia+gRBJghJiD0P/HEtSIUezFbPAkCpTARyYABFlStRIMR8VWNiFDX4yIlfJu+GPQx6aixAYmjQtsERSUwRaxQbqhs6NLGpsa5d/GeuTta+9J/+BNBXwwCH78qblatn98XT64/qEcvQizerRR7NDAMLXSGkcCoF0PDNvPXBsAu0VjgRSgwG23QiIIRfIve9D8/Ny5j6fNz5ub96nCdy5bIfc532tu2StVVTUKFLh48ahs27oP3wU5lK85SdVZFgvlmsV2D/ySKLlEYINSXNeunZHGxq0osM8AGxzrjZvn1FjZP4vzZEgQALBaLar4PxruVyBJC8ao90u22c0b5xXIQxCFQYmnCTDyzp79SN577xcKfHntG/8LgMC66e308eaCK7leGwvtD31C9j7ynGLVnD71nvzh979UIOIzz3xLagBmkOmijy2Jz3V3T7ucOvmWAj8GBnplx67D4vOXoCCWp+UFQCzzQi+O69fPS31dC4CimYs3SlNdv35OMXg2g5XR0LDjvgATPO5VVZvx+X8Ex/p1+fzS6WmjehqkExziLB4ePyNWJwMGA2R18mz0YmRgpRnIh8efx+NRf/cadwt+/ORX76sulwt+cN1Cez4mEsyeQHCv495I2z8o5gfBj/8E5ocCP3749fvO/DDAj410Fhr7YmTAyICRgbvLAOt1unKKvqU/q6Rydy3d/7XXigFCiSZKNRnxYDJAZg1zbPiAPJj83murSwIg7EA3O+dzzt7XPR+iXhSrUbf3eeMALQqlwqs1R1NrPySNKJll9ybFDkkrqxVFbsuM1p4CPwBmMMgs8ZT6Jc+uVY98RfniGk8p8COEDnbseBRmwGYZhfRTbEIDPxyTI+CIeBTIoQeBjAm8tgIE4XK+1o3PKX21zdmiGCIZfBEyCFK44MOhMzS4zBXQfDl0wEMtAxMklIYHCkCVCGS33ClUxVErp8n4pUHNV4TsD7aXjEMOikPEc/p65A3hxg/1OfaTG1OuMUkgRykf/E/gEWJ1mVXb8dS4eMHiYKE/f4L5CavZzuAmiKMeniilB2U7Zh0Pxtwge9SjuIc8x+G3AWkwSjQ5TAmxFxXI5LhXplIwlwcoQQknRuelM4rJwFtRv6cUoNSYVJjGZMqrFekrykqV9wcNzClxRRYI47G6WvkV5JzO1wflsFv7siyIgG1CUw+2BVYII5Q1UO+N4fjDS8GNjz4ZHbr0Fc+bAdOw2LgposZeI719FyTiDEopZnmrNgCiEeqg7BWDBI8CMC90HxAuI/hBH5AMCvcME2aCUxKL4EemFKAI6roK/IBGP4EdFng5W3w0nBQzmCPjOG2tDi/yhhnv8A0hAOct2ycZf0ydi8XOgPIHaWz0w7g5hO0h24bj1wWvj19euSC/7n1H6zfL+qCnzNb6arn22zfl7PEzEgYtqAvgh2J/rCMAxIbPAymf+1pm/+BtqsInLUcGTu3clywondTRfk3JNKVQiKenQyUAx8WkmigVpRfFKQt19cp5SGh1KnNwK86pggK3aqu3t11Ja3nhh7EcmSSCAs3Ne2TXnqeU58TRo29LYWG5bNq8B9+hFsXgoDzU6VPvqqO0e/czyrNDH2tpab3Y7XYFjnC9yiqyKIqVZ0V39w0lXxXHd0YuY0FnSRAYuXb1NApWhfLY4y+r7Rj6tjR8Z9TUblbvLZYftSLXBdDxR6/9pYTBZPni0in59a/+K2S0bsi+fU8pgMICo3fKVpG5wbFdPP+x8tXYf+CIvPzSn2SBGP6GOGTHziekZcvHWTDlX8F8qVDHQM/Luc/+IMc/fVOBJHv2PiG1AEhypcP0Ma3ksShQLM2bd8rpk+9Nj5HgE0EwerYY4MdKsrrybQysaeW5M7Y0MvAwZWAl4Aevw+4G/HiY8rEWY10p+PHvv7O47NWDBD8M5sdanClGn0YGjAwYGVg/GaDPx7aGIvmLgm1Kfl4P1uVqy9ZW/opjWQsGCOWv/BDBN9gfD/Y89efVQyPoc0MG68GmeUWtLwsAYcsEPpwo8jCUeXX2ufYYF4IW+eZymFBPiLd/Kit8hPL9cFqqYOZB8IPMD5NrHHpz8IOAd0deBEgBcAEWoLm9if4e4bTaxo/lBD9sMLX2FNdOgx86+yM0Sdmt1Ax7g/YScyTw+UWnAxQHireJK4qZzpz9n01VLvMjV56KLA/2PTlkEzPMy9MZfEFiuqn+xUmZKsaVst5p9scOy1bFNqH3iJLf4ujgG1EAoCNrgaL8RNg22R8EQMyQx2KbVshPqQgOyWQ+pLe4PYy8ewAijIwMqbfq6jSZsQIAOPWb98lej0/KndCgj2P2/hgYEpCJKsi3KgCFUABHGIWfihO55zgCpZskDIPv4dCE2CAJVugD6wZMwN4M4ZC4mAcy4m1MyChYH/mTY1ILZkhfEKABAAkPAAgGvUD2gK3BI8OfDIIk8YjGDEqb2U5QHTuBnwQZQwQzGJS+0kEzG1g8jJ7oI5Dj+kw9JwvkCJgrjFwZLN0HJAWKTQFQVEY8UwbGgsZaiGfBsJJAkcQgbcUoiE4h3TCcR7GXRVX+OaHNz9nxUcyaR1larRce61aG5vyjhw2bbx8ZkQxYIT4AeFHQRlJgGhEISYH9cWmoX97tvyZHe0/MYn3Q78MxGJOf/4+fQoLstlQQxKOxPYLghwJB8DyNY7DWQZ+P1w43ycsH6mcNxcZ8QgrryxyUs/ocHheUaWLxfPv2x+C7oX3mFssLi+I7dhxURXHdDJ2sBLIWSsEKuAE2AgvywyODUlFRrZgPPCeXCoIGL7/8PcUyoPk4GQ804baC3UX5KoIUjKeefk2OACSg9BaDTAkCJY/ue1Hef//n8uabP1EG5iWlVYpxR+kqjouyTWRJMGhoTpBw2/YD8vSz35SPPviVvP7rH8mJE+8roMMKgGIsElbm55R82rJlnzz77DeWlR+CDwR0du96Qn745/+HvPH6j9XYjx19U4EYzDXBGgIyBD0IGHEZ94vgx5Yte9XYGGyLeTly5DuQoYuqvPx///X/lKrqbSovBIp5DBhPHv66yjWls1YS/NzPDe4HAZuy8nq5cP6oApDYNwEYfYxztzFeP7gMGAyQB5dbo2UjA+slAwb4sfZHYqXgx1KeHw8S/DCNBgzZq7U/dYwRGBkwMmBkYE0zQIZFVbFLKoruZHfmLSXLsAojXwsGCOWvbHlrD/6sQnrXrAtlLI+CLGsF6ck7lYvWbGBGxyoDy6p65pqfE/AgGMI/5RsB421GfNSCgveAOFEIt6Bwz3ClTJhlPyzxqig4AAAgAElEQVQhFN6oZx+zuMU+ZcFM4HEgj6iTo1BcANkhymSNRVvVNrHoGGbt40nGL9ZRmH37kooZEUnmSR5OIEjuoyCnVtWCwAdq7wqUQI2ZxuQWSMXApUGxPyK3BoXsj8o0iplgR9AZPAmJLJqRzxe6Bwjfm7AmIOZlk4QDbAR4dDBIo2Pksj++nXlZrOVmBYDkBsENBXBkZ9cT/GBMWEdQhp9Ba7gOxz2eMSnwgyyVcQAhBBCSU33SsLsBM8C1Ql6KpuvRhDiqrTAxpyE3TdQh5ZTtOJkAg8RdJnwc51KAIP7CZuQYMk2QxIonAEbEOkDbaQEg4JHxVD+2dEPKzAQmj0uuwnTdnZcEWDKuwI8QjeoRu5v2yYXWM9Jj80ltSYVKRyTBY1+sQC/FGMGxIJCFLhXgwSD4ocAzkDZG/ABiQtr5o4MfFXmFYIEEJfb4lLTEytU2um+IeoEg+EG5q/6xDoAxwBesmiG8JHuUN4gFniuWWo8EByDtNKbJ+kw4R8SUxHkARsdEMK5m5Ct2CHxDcoOAUbQHhso+bXkhjm8C3jEMyorR6+OjeI+8c+atWUbnlDw7Ur9Zrtzukl/85h+loC8kFTlsj14vjjCWMXZtboE0mfY5mdX5Grwwo0AOPGc6+PrLHvSdYBQGylThvbKiToEIS3k6cDMCawRLXn71+wqsJFgwie+p8rIqefqpr8sEPDVolE3ggUV6AnMERx577EXZtGmnYg/QpHtu6KABz1v6ZhDEYBupVEQxS1q27Ac4sEcOP/WaYkjoTAyCBKUl5fISwAOyIi5fPovzeADfETHFUiArYj/AEbIt6KdRWVmjvDV4DUgQ5dVX/1L5ZlDiif11dtyY7tMN9hfZJocOfXUWMDF37PO9Jnvjkb1PKVN0sjT09rk/DIfTqfaLgAsBpT17nr7D94TrMS/MHX1BFsoLPUcOHHxVgSXMC4+v3WaXnTs0AInt83VucP8dYIUdPPisAot4THVPldz1yCh54YVvKRCJx/Oxg0eQL40lM6tB48UDz8B6ZoCwaDsyPASwrOIO+Z8JgGv9/f1gWg7I8NCwmqBgxMOTAfq38bqjqqJcyioq8L2xPn7bH54MLm+knBUZCgXld7/9nZx87z15cX+xHHn6MXHacy5gcpo6d+WWUPbKYH4sL7/LXWul4MdSzI9jJ8/Jj37xYGSvDObHco+usZ6RASMDRgY2dgZ4LTGECcB9I9FZEvpO1AGrilziXWMprNVmgJD94Zwy5K9W46y3Ybq4JVmIadlaXXE1+jT6WF4GlgRAKA2jm5/nskCiSVS0x31ggmgdpfK7UezGTN7xG2KC7JAdhtoTMDSPjKPojz+3y4bn/TJUVCg1FW7JD+VJCP4iPhQQGeFRvSBll5pQWkoCM0XBKACQlFbXVsUC1gt4E8q/lAcvUHg3ZU3QyfCYgAbSQOWgXLqNQj/i1skzUlSaxl+1eu2Je8Vdu1XbPlt8YFsM/ZH95Jqi60UKHeQg+4PgCuWPSvLsMkw3dITO/iBAQkksPejxoSS6soAI2SAMHfygGTu3jQD84O1dj39EruaDfdLgkkP1u5UMmX2KjIYpaOg7lTxY/+1O8ZXPljPqw+xwiw2AAP4IJ/A1i/wERBiRsFdCwSAMwbXxjkDGKXw7JNEdu+RaVaWYUg4AJyLXuDLu6znLthdLytgaAJDjibB4S59Qbfnq/Wpb2NWrbUwNms6zs3tIYlV2cXSj+Fs6BQDDTVUqaZgAAAFcJGmFf8dAnpLDKpFG6W0NTrNLBrvIEEJkfUB09gdlr3RGSLF3QoKj0awxOtoLhcUWmD0fmIDJJGTTGARBwphJX4LiNMEylymjABynIyqjA+OSXwKpMQyQ4B4hC6cNs8lhCpXOi8Lo/LNZRudby55QwEcvjLR+9uHb0n/xMwV05Epd6eAHl33r8ItS6W1WUmVrHXEYn5+7Pgzz+eFZQ2ks98qB7eXY7zkUqrUe8Cr1T9CAslUvHfmuvPD8t5VpNsGP5UgnscDOQvtf/Pn/NT1afdtDh14B02IrCp3a91AhAAauS9Dke9/7W7U+DdUXkpFisX/nzselqWm7YpDo7RAUoZE4C+8EFuaOk2wE+n7U1DQpSa5gcED1xf7pVaGbuj/zzGtqOcdLMIfjoDn51772l/LMs98GowkA8rgG4rFPAjcEdjj+hcasGpwnOEaOqw4ABwEbtj883Ds9NrZPppY+PjJZ5u6X3uxSeSEAlHv89OPL/dJD32f9Nfef+/7Nb/7VguvwDT+AF7bz6qs/UOfJYsdvnjQYi+5jBmZ/49/Hhu9DU+Pj49LZSdk73zQAQuDj5s2bcvbMWbnZ1qOA9kjEAKDvQ7pXvQk3PMZs8JCrqwnIrl17AZhuN4CQ+3wUDPDjPid0Bc3dC/hx6ODeecGqaHxCyPx4sODHnxmG5ys43sYmRgaMDBgZ2GgZSKan5PKtYfn5B9ckDCl9hhWqFxUBp3zvhS2Y4Lu2k9hWmwHC+19/2pC/Wo3znEbohg/IamT67vtYsiqbydOKop3xTrEFJ8VZ7FcG1o4YlqNSbclncU0rfNMLJIaJ9KneQaX6ZDNrXypJMEA6wI4wOfvFnw/r7ygK0nG72EIJCaN+T2CixuGWzt6IYguw5OYbC8OnwicTkCSahFzWdGQZH3ytgxIpeEhMjGQUcyIWH5BT1m7puq0V+DlTP/L0LvlMYNLtztI4UP6uznpM6O36SjSJpel+5k7KnvOa0lpsm9udly4lgxQrcUhla5GEPJ1SMdEgIwAtLAUwd88q3iTT9PlIgX1C8AaMD4AiBEoE64AKM22gzvJ0zxe3JH37ouyoPyC1yE/HQBcq+VpD+ozDnq4OBYDQoDuRoD9Knpjj/mmwg0wQAiGUeWLwMZ64jll1ODblQbmYgJn3hyemd/nimY8lU7yIZEy5R8k8vd2e3aZ9elP1xBLQzgOlj0WmTXpMLN7sMizqsA3ObKBIHNq4ymP1qt3zMDh2IBeUwWJoPiCQ9ZLRafCDMy8Hbt5W/iYxT54mjQXTkMFusJEAqkziePjgXUJPkCFpE4/fKwkc68kMwTjQR4DUTHo0tkcUupB6jPdrUmN8XdxYrczXfwYz5r4MmCRZo/MjZc8rrw+yPt49/WugS/BQGcW5qTM//KUy1HNLCgD0ttTskf955wF1fMYm1wewMI5z7/ilHnnr1K3pc43n58Gt5bKtsehLC4DwmOsF+pVIGREMmA8QYFv1dc0K9GDkFvXnW18/F3MfuR5N1QnQ6O3MbWup7aayBvdzQYX5xqDnodBfrIr9+rbL6XO+ccxdlts++yDArsfc8c3dNvf13eZF73exNu9mHRqvG54fi2Xzwb+3nhkglVXV4veDxWq1Kn1fFnOPHT0mnxw7jckcbikvb5TmTY3w+ypWTC0jHp4M0A9pFH5Gg4PdcunSdbl2/Tdy5fIVgLrPgFVXg+/59QzNPRx5NmSv1v44rQT8kKGbQubHYuCHzvyornLID7754n01PNeYHwb4cT/PHv5+Lfadttqzl+/nvn2Z2lrsGG70PBjn8MY5wis9j6kk094/Nu1tq2dEl9Vfywyt9ncoGQm6/NVk3oQoqSYjHkgGaIRuxPrMwJIACIdNj44APR5yQNKKGtzoweT7RhdAkXGUIlK3pvfQj5nCodCgZCI48DDsFUisMKKZgJSMkhFSLuNhACeTKZnqjckleljwfbBKCKz4PCwIwAAdIEi6uEpKyfQAIyMZjGrm5hBVU+BHFgzJjPLDmxCyKCjqxUI+C+p66FJEneUU3hIpr6gH3lAk/U7NhF0tbNfXXvzRNBScBRJ81K5Jd7H4n2wfk0CdVuyMJC9rDWWZK3yhwJgkDMatAFtAoFGROwE05zsodPuUJG8OSM3O2QwPblLoL5TYqGYSnm1FhuNaOYjSMBYbmCJgfJD5ocAPzNj22QEmIT92m1fJYNknIrLLVinlJdvkD8c/V+yHfMhiuSKUxNJCkyPzoEiTL5lUoYzZp+RXgHvIejjo3A3j8HIlcTU41qtAseiVmZyba4tlIBYU7xXt2LLFEXiLZLKGU0VDGRku1goFpn5tX25Gh6WuHwVRtCtBzUOEYAilwPx4Xe4iQ2NQuvoGpbezU51brsYGdX7SBNoCtkkwnpbCMpcyQSerI9jfqxgsTp9HCm3woAk5lVTXeAazc12axwMZTkNRDZzZ/dxL0gOGytHT/6SSQPCj2rlJ6PXxbGhY3oPR+efnTwPkuNPgnMerOlAnT7fskb17HpGCJNg3cSAuFu280/O6Hh4JfOhhz7Kf1sO4NuIY5gMa7nY/WZxfSTsr3Y7ju5dtl7t/K9mn3LZXY4zz7YsBfsyXldVdtp7LzPnmvGnmxwDkrv71V7+Wi5/3ARDdIbv3HFZyc/d67q9uto3emAFdLrq6qg7A1qMSArP04oVjchxyPt093fK973xH6hoaFy0YGplcPAMG+LF4flbj3ZWAH5HWG8sGP9xekwF+rMaBvMc+YtAzDmHGXFFRkQLz9WDBLgVZBrIa+T7lgumZaMT6zEAB/FWLcQzdHkiUQ7qV1ydfltAlR8vKymadw9z/9OQUfD4TkAQeMs7hdX5C8Bx2YwIsJxY5nK67OofzzSYph//H83vr4Bc7M6E6AC9cH9Rp1jpWCuqsZNxz5a8M8GMlWVz+NsyvPQ81wPU8Y2/5u7Oh1lwSADFNgbGBArEeLHSzWJ6YaocprjZ7l+yQXcUa6EGQIwqmSHSIP7CDkoDMkS1aJCWV9eIAm2MAc/aLrJoWGn+IGZkeSGNlTTbqUbxW1BJECBdVpfAIWTBILkAxO+OGbJQmJQ+VtZjstLeIKU8rqps+uo6Z/5r808RZIiYij337q3LgwIsABODPAc8H3deD75GRkUxNAWIJylRMk1AaSdsAGqCwbh+WP0SPy4WLZ1Q733jkjyUwaZcRM/LD7vJLJPrFGNgJ2yWvQOuL68VQmNckn9qE/IrSjE3JP5VmitRjbgy23ZLeqaCSVXpk5zNS6dAYGeVev0QAHtFMnubmGZgS101MSrhPk7fhgewbhY9GVRmeacwKvV0as1NaywLJBrOrXIhx9A4mpLnRjSOkBT07aqDxzwsCv1M7LULREvWcBufbG+zSMVoqO7pgwNx5XgQ4z7ZiyJiBuZPv2yKuTD9kzCqUvJkbxsM0Ur88VCSu6n4JJItkxDIMsAxe4xOaz0d+jVe2ZcGNUXuf8ljpHbwp22zbJXytVUZ9TnWeiTOgGCE0VedftAOeJpBUcwE8u82NwNy4DfCCRuaV1Q3iGgvKKP7cLr94kxXwE7kpFcxHZjvAEfgcYBvK7UiyChfsEXFkr+l5fhLQOZ5ulf7em9PAByXOnjR7peFsu3x2/UPpvtiGnbDNkrxK4hCWhOKyaetmee7Jp8SKsUOXC+bOLg2YwozR9RBep1W+cqhedjZqwI8+pnKYg/E9I4wMGBkwMvCwZOBhuJ7MBT/ovbNly17lY2PEw5mBXOCT4GugsFgOH/6aVFY1yInj78iPf/Iv8pc/+GMDBFnh4TXAjxUm7j5uthrgx9/88OsG8+M+HrMH1RQLw5e/uCyHnjw0XTxmQbmnq0tOnTktV69cUjLXeWZjluuDOgb3q92pyZgEAgFMztsrjzyyF5K2GkP1frW/XtsJA6C7cP7CHecwfdi4/PRnX0hsfNg4h9frAcwZF89hh6tI9uxskX37H5X5QK35doNG59vqi2CE7kaNa+bOgcBIoWeRGuN8jT2AZavJADHkrx7AATSafCgzsCQAoktgRYc0LXhnJWb6Q6ZqvCcqURSTKYlVHbTI9eAwwI4gkhAUGwr7eujPMwA/GJxpTymswslKCWZNwydiqEzj+qkEXgzOCTzJAWQpeaRCBzqyhuL6MqpvWQFXCIzPk+mo9PZ3y+dxOFiAabC1LaN5WWgtiM8MtN/mhhZ+uVghB2QhcJLjy6H8OQCI0LvDFAHjwYtZnABDXGSWOCBflV8rpf03QCHxyHPJ7bK74jHVchO2IZiSgt8GxN6yRuYvaMBMdrwTlZPTJumWKPYRPhZ2eGLEwTZg0FtkfGRYTl37SIKf/EpctV7ZsWm3xuQAY4NBU3NXEuwOaPpPwPgccLj4s0yQCa8VUlmN4q6sVEwZsj4ka0Yf7h0DIGCWUb9NRlv7pAQYyWi4RwaTxYoOyGOTto3CF2RKyZCFARCQ3eNxAtSCCbrfOQbwAybnWHdztR8ACLTwXVnECeOqwHY4esqgXA9uV4HdDA8XygiAI3q8lMz5oZlIxSQNOTQyf2rsNTBDvyAjTQDc4K2Ck0uK0X/Up4FlHCPPHXM4Lq0wMvebnTzqYqp0gphUosARdzSADPklTh02BIEO5Z2S9Zmx23C641DSD8TuoOlyvpq15K2rVr4k793+FGiVxvrQvT6eHjZL9O135fW2M9IxFJYugB+54bsZlxRm1nz1lSfF3bhdbKUlSpIMB1MSMGePxSAHl9Y0J2dtuAYvbPBE2VYXUH9GGBkwMmBk4GHOwHpmgDCvLBR98OEHivlB8GP79v2YwLC0dJrOMlgtltFq9LcafazFuax7J21q2qm8kT75+HV547e/lR/84Aeq2GTE8jNggB/Lz9WDWpPgx4cffCi/e+dNqd+5T1458pp4S7RJWHP7bG+9Kj/70d/J3TI/DPBjbibX72vOtt62fdu0lOPQ4IAcPfqpfHr8GORL4SlXvktKywvVhDMj1m8Gkqgb0AtzaKgXv08n5NOT5+WZwwdk/4H900zV9Tv6exsZWS+79+yGjLA2MZS/M6dOnpI/fPip8mCrqGiUpk37VL3AQp9QI9ZlBnLP4fd/f0FOnDonhw/tV8DWUtdaZFiw1kez8zT8QPJz5M8Jgqx1rCYDJFf+aq332+jfyMBaZmBJAIQMEN0fgWCHMj9HEPxQjwBGWG4eQ/GeRemoP6pYH5wVb5nBQVCgntnNTspIZZxC4MMKhgf/zGEsIz+C11GpYbA/NDaGKiaDEJIC0OHCDSalr3Sjckzn16SwuAl8QC6ePSetpZBZgo9F3qXbMhnMkbjCOuHJPCkFy4HFbz0IejCmzcghDURwhHJRSZTT+UgGBSMOf5GrtQkpvxaQ3S34wYSJqR6p7DrTC7JP9PZnLQf4wS9juLVrj3hTN1dPwDjejTFWO5sU+6O385KcPn0aMw398JJPwEC4SezWAqnMAkMVLZuVx4QFLIPyimIZh2fLBIAdgb8KQRB97DQzjg9G5dy5j2Xr1iIphplxAr4phRmNkUFQq7NXGyXBj9wgmAEBK4ngkaBIClJiF3HcNzWRgjOzbng4rYATBlkgETAxfEX5aHcGYWc/ZHYoVkdSk8/SziGAR/BIH4gNS1HWZ4aghy6LFnOAtpg9h0xB7F8xpMB8haqtKLZju21dNwCaBMD30PZp1KLtEEEQV3+XmHHcdSiCIEnrWFIma13SdvmiXIhen2Z9UKrsb4saZKq7R9584y3pwOMwpNn6csCPajBozrnSYmuqk+9+++vSbPUptoc6XxHqmODTRZDFwfMbXg5rHWT3DIZAac+agHE8BaBCu+BJU+KHBNqXiBa91sfC6N/IgJGBe8vAemaA8Lv24sWLcvazy7J506MK/KAn0EKgBgGCNCdR4LeK0iLqu7nAoorq+ZhgsdB2K83gg+6P7XPMqdQE/MlmrpM4Xht+s5kLfZ2V7sN62o5sEHo0Pfroc3Ls098qv5eXXn7pDsmN9TTm9TQWA/xY+6OxUubHX//p47IPM8qd9jtZxDQ81z0/KHtlgB9rf5zvZgQeFI/5x+jp6ZE33viNAvQ3bzosmzbvkdKScgXqG7H+M8BZ5hMTuM/vapXr18/LW2+/hcmAo/LikRc2NAhCz9Ta2lp1gPTfmeMnbyk50sce2yPlqIWQlatP0lj/R/LLPcJ4PCp9mOh88wbO4XfOKgm+r37tq4uCILwe/+L2iHxyoUepuehRiInDzz1SIw0VNK5du1gtBgjlr2CHDMWWO6X1127vjZ6NDKxNBpYEQDisrXsbYEjtkp6BjAz2aD4PnIk/Cf08BsEPRggz8yVH0YkgiB5OykRlIwnMYcKugR9cFAT4wfAlusVtrpTIpEfcaCgWNeE9MDZAAnF43JKKJxT4UWAHEILnDB0MufDJB3I1BB3H7c0yMHlZCk+1y+AcYJcMkJLSKrWdHgQeCHiMAfjQLvMwtizoYSKokXNxd7kITAwAM00w67ZDGkmwHqWlTDGNoTGrYbyYyy7h+7rclhq3PwEWQ4FMhlDQN49IDABS5oWAFLi9srkKfcCnxAqZsKHWVmmDjBfZK4yiwgpIN02IHyAIi+4pUwB9AezIAxKAMNF1He+RlRLA+Fx1VuluH5IbN89JaOSKRLorJT5hkrLiZsUwcZUBTUAQRKgLVIvdmcTx1sSxTAUApuD/oYEgajU5aHHLyWRQPhlolfqoZ1oijdv7pyIKJNFDN7bnawVWZAE0vqZ3CMEPnks8dypwvHrLR6TW0qDWNZu84vKkJGoBaIL1HU6aiZN5ElTgB/0/dECFwAe9QkKSlmKLU7FAUqMZsXts0gm2ywTaCMQa1bDGYYrurrfLNUeftJ65MW10Xpoola1ltfKtonKJXzwpR3//yQzrwz4jY8XzmkyQ5ze3SHVBg9TXIJ/tA9oYHTVgfcTUhSXBD6dz/cidjI4n5a0Tt+Xji92YDaRdBFiB0uxvKZc/eaFFAl5j9os6QYwwMmBkYN1ngD/vSUwYWI8Ri47LsWPHJA+/zY8++rQq+C8UBAkGBvuku/uGtN/+AhKOGtu2KFAOb6sdkFdqvK+eIewvFB6Rnu5b0tZ2SYZH+tTQvB6/1NVvlypce7CwtdiYF9oXLp+cnFQylMPDveijTQYGbk/vE/soLa1XklFFRRXos3BdTA5YbH+W+x6BqsbGrSgU3lJm94/ue1Qqwcg1YvEMGODH4vlZjXfvhflhgB+rcYTWrg8W5/r6wBzIgh+6lKPdjvtVgNz3G5xfuz3d2D0TpGehf/OmXSj618jZs155//fn1E5vdBCE+6j/znz08RXIJz0vu3c9MUuO1DiPH47zn987DfXNCrjyeAvhv3YUA//doiAIZa86+kblfUiZj0VnFDlqSj2yq6lozQGQ1WKApCfT4jSVGKbnD8epbozyAWdgWQAIi+EEP/TitQ5+6IXrDM0VEKZCu7gTeRKB2bQefI/LWeCeZoSggDzoHJbqTLVigTDIHAnjdS14IDGYW/eFJ2C+Dd+KQszaR+25AFgLgY/JIdBBUIzmcwk6JAXGQbC9XW613pTEdkg6RWC+fqpbbJh5RFhC9/9wZ+mPsUibKjJU1RWDWAIJrCzbw4p68ARAAyt8R6wADRi6d4Y5PyZDY07pAuslOTImLaYdYsofhSQV9jnL/JgcM4vZo7FJuO0EZtXTyYQgyDRjRbUKtS6ASakCAEATAZlyjSkZrInEpIQ8ndIRSSqjcPpZcByB0k3ZrQCeJCKShvxVwmaTG91hlZ90oBTSYaDNYF2dHaP6znqbWNzwOMHY3Z52sFy6pdYfgPxVQrwCr4uhSgmbw8pEXGNuIJ/53ZJCTWkqDjYO5bwmXQpy4DlAYIOPNLmXgRkJLIIQeiTTpRIf5Z4HpTeWpzxBrrZp7+vr6ecRDe8TWfBDsYdI8egbk0RTgThRAzI1AzDp14ozBETG+b4TgBj+GGQmmei3gXAXgg4Eo3TKag3FekFnBXsH50axqUmq8qDFheMLXo9YKk3igEH7mcun5MKAxvowAfqi0fnzZdvkYEFMrv387+VSW98drI8hGJ8zaHT+g2efBlnJJu3BIbFEbYDrnCjyhCXcekviaXjkjEdwXEokmfX/YFForSM9NSXdA+NytWPGmL4AzJSKAD5jhoHhWh8eo38jA0YG7iIDcxkgc39n76Kp+77qrVtt0tM7AgDjKUgvaBMT5uskEgnJ5Stn5J13fiYXz3+M2Wmz2RKF+L1+FPJZL730J7J5M2WWFgZS5mt/7rIEpDOvXj0n77z73+XSxU8kGJr5LeC6dvzO7trzlLz88vfuKBDMbWu+12z/1q0rigVx+tS7AHWglzlPVFXVyO7dz8izz35bgQYbwReFBRTenNfVbwGodAnm6BchEVNhGKLPc/z1RQb4sUhyVuktg/mxSol+SLsJhYLy5jv/ppgfh5742vTvkFEwfkgPKIbtdvvBfnhR7cCbb51Qk/U2MmNRfce9/57kgh86gPfwHsUv58j17x1eMz6y9ymVBIIgVtt78vWv/xEUN+b3I3LbLTA8n3397MdrC879tY7VYoDYrA6xp1AvnHvztNYJMPo3MrAGGVjWJz8WhaxUOE8SPb2Q9tH8F/SxsnAdypGIJfgx3qExIpxuzcyc6xL84Mx5rm8L2GHFMCxdpi5xJp2KSZIcjgo8qBX7YzQ8NJ0K+lk46mjsDemEmFcSxaMKBDEXAxUpjEmqOyWfHH1TOrriUv5sofSDjRI4M6aADx300B/ZRnIkKG23b0hz8z6pAmhAtscEzMEpZ6VM0bEOAYSJNKSBuIFig5jRb48CVxhlHo1OqV5kIxf80JexIENQYm6YfXEFikzBKJ3sj6Q5qozXe63j0hW9KS/ad8ByxKfGRhbIzieflM+PHlX7FIynxR0MSwcKF7XQABsHQNDcvA0FhIgUVVRILIQ9AJUlb1zjs7APSoUJ1KBcAE4mYwlJ9MakvLJYOjt64MgelZijTNJh6F/mDwB80kc7Jv3Y3TGnRwrSVTB21wARngsOLGNQrmpPUSExC+UjojNJ6MeRX+AVRywJD47ZP0YEPZScFYLr83njEHI8hnGH+qQgNiADFTCI97WodZQROoLgid4+DdudPfky6hzR5NnKtHXIAGHTQ/ld4sm3i68APi6jMJRH0YizXvM8MRjW58mJMx9Psz7YNv0+/qbAKa6+K/LGWx/Py/rQwY+WmmeEfcoAACAASURBVD3y/KYDYgYLZiTr7ZFIaDMK+vr6xYkZrQFvWoEfFotFMUHoAZJOz9Au1Q6tQdgAxm1rLJTg+Awby27Nl01VfuSZ7BojjAwYGTAy8HBkIAmPjWg8b10OtrOjQ+lLk03B2V3zFYv4m0Tw4x//4T/KjevnwIyoQPF8m2KpTkwkZKC/U27fvowb959jEkhCvvvdv1UySyuVUyQIT3DiX/7lv8j58x8psGPLln139Hf04zcwISOudN1bWvYuuz+CHxcufiq/+Pl/AshyRh0XAh3FxQ2YqadpxI/BJ21oqE0BI93dP1Hj+c53/3pFYMu6PPAYFGfXWi2Vcu36NXnu+ecMGawFDpQBfiyQmFVcvFLw499/x5C9WsXDtGZdUTrm/Lnz8tmZW8rHiiD8cnys1mzARsfLzgALyARBqIhAxuKWLc3wwmjecIA9i8uckHIMNZTt21+cvtaY75ps2ckzVlwXGWBthyAIFT9On/4E16tbZdeuXXecwxZ4fjTBv/YvXtqm5O71cDlsUo4JoGsdq8UAof+HL+/O+uVa77/Rv5GBtcjAsgCQUGhQgj0o4sKwOgPGRtqRJ2NDmr7VXNYHGR808NZZIfpOsYAMQSONCWJCwT8rj5XErH6P2KUR0ksOzOwn+yMC02+3zauYCnoo8MORBVacBFA0SaJIpEc+v9iKm+xyiQwGJa/ttmJ/2LKMj7lJ9TqskpzqU4ZgBBgU2wPgB6Ws3Hgk64P+3gRCdHkrSlP1Sodifzye3zTdJJeXmGcK27rfBlfQpa5yPUYot6UvJ/AxxY5cMGNP0R8ipRgmmc4xqSnV9PnUWCDB1bJtlwzevi6Ra5DBAoATsedLIf4GezulxBGRREO9uLfuljJfgXRPAlzpKZRJUB5i2J90Hvwv8CXPmARwQvCDMQjmiyPPLrHRKYzphoz4C6V8htQBXUUN7OG6tryY8vJgEPzwZyxSkQd/kt7bMmbzaSBFFoRQKyHSKPwDF5EabOeE6TsBDOXnAckvgiBOj0MxT9iLqbJMyhrhQeHbKqH3fyumqzck3aD5eLAtnTlCs/LR9i4ZJSukslzyUk8AFPlUGqo3S2dkQOs4+z8ZLZ1glxSnqxUAQubHNceYHL19Qq1hqtFAnD9PlcuB8YR0XTwhp79ok3AkPsvoXAc+uM3h7a/KgdrtmKWrnYeBqaT04di5wRwaJ3MIcmUBr1t8AGgYI4Ot6hHIC3Tcl/VR09Z/QP+7HRZ59bEGObKvblYPNASz07DECCMDRgaMDDwkGaDJ+NSk9nvGYj4B5/UQvOGmLjGjKFA8L/iRwd03Za8++ODXCvyob9gkf/TaX8mBAy+KBxKY9AHp7GyV3/z2x/L793+h2Bo7dhyEB1idAiRy9aoXupGfq2lNWSoyMwh+kFny9T/6n2Ag+Q01RvbXB8DlPTBD3nzzJ2qdLVv2KHaGGdc5SwXBHDJLCH6cPfOhVFfXyuNPfEUOHfqq1Na1iAOTTBi8Julov4b9/pUc/eQ3uHY7ptgfC4Etc/chdxxL7TffX+72y8kn+15qPfbpcEDC1V0IBtClpdL2pX3fAD/uPPS5E8ZWY8LMvYAfhw4anh93HsGNt2RkaFBOnbmo5BG3bNmr7qUW+t7deHu/8feIv7179jwtf/h9vxw/fhK/1fUbDrAfhxrD8ePHxZxfK7t2H9oQbNONf2Yufw8JyPK4Umr1ww8/wDUrJtxkfYv0VggwVBW7pKLozmvZvMUuEpc/jHtaczUYIIb/xz0donvaOJ7R7gfvqRFj4/uegWVVPTODTgkO9YgHJtL8yw1KXo0BffVAAoiPBEQIfnA9/TXX98ZcajnfZ5AJQkCFjBFKaU2wduF3zwI9uB5BCgbBD1fKLumURQNCIH9FBkj/0HX1vqXRLu3OKUnfHAAoYJXR2IzOH98n8KEvc6LwTsRYUvDJUFuDEpr1/eBzHcigvBUBiB5npwInGPU2jY3C5w56npitan1zOiIx06SU2DXwgtJXOvsjF3GmP0cEDBM3wI/c5a3V/TJyulV2O5sVwyQ+nBJ7UYFqOzwxpZgfjJG0SSx4bocRFPdnNMs+KCkphkk7wCcYEQ5B54+FhpA5KNb0zDhUA9lwTSXgA5JUIEQCimW2PgAi0ENkkOXBUFJXCIIJvbEyqXBMKRks4ueP1dXKr9qCcjwRlsez/lH6dpTByg2fNw4QA7NBCwH0FE7B8L1UmZWTKaKDIYrV4fNIY81WOTd8GqbcKQmENFaC8g4BgELwg74hTosmKTIV/XTa/Jz9qXXwqLNGyGix+yol6BuV42lIft2+OT0sSl59K+aQTbcG5Q8db8oxEwbIU5PSatnQwY/iykapsdeoXA0P3lZAR0GBH0BgiVjDAM8AfnT1DaLoFYSxWp3a2uP1AACBlBjADwdkMdZL8Lw0582eNZ1vNokJFwhGGBkwMmBk4GHJAEGG4GhYDTdQVKS+c9dDEExI4XrCZnUCPFj4EotM18GBbsX8OHz4m/LEE68CmChWRXbKM3C27bPPfgPynp8pxkRv7y1lKD6J33camUbHx6S4pHpevw4CEt097RLGRAYffseplzwKNmRvb5diflBWi+BHZUWtmi3H/li4pyTV5ctnFSjT0wu95MjokgUDjlcDc36lAI2iohLIafyZvPzKnynvEnpj6MGCC30/OG4XJg6889ZP5drV03Lt2hkFtuTn67ryGbAmk2of2m9fUbNUkzBwtYDC73JCIhQeImTDWPGa2uYMgkqh8LD0YdwFAMPIxEiBeUnApRu+HPr2xcUVkCbbPu2rQhkygj8pyFV6fcXz5pPt614tQ4NgLrs8qv/5pLu4v7pUGUE6q/XeZMvWwzl9v8aggYNB+d1vfycn33tPXtxfLEeefmxeA232ee7KLfnJr95XE1NefGybtNSASr4Bw5JzjownZiZ+PahdXQn4IUM3hcwPA/x4UEdl/bXb0dkJs+EB9bvA71ojNl4GysuqIdVYJzfbbklPV5c0NM1M8twIe9sP/5orly/Jnr3fU9ceRmysDBCQ5XGlf11b28eYONQpW7dum8UC4XXH0GhcOvpHlaS+NTtXivJX9WUe8c6RxlrtDK0WA6TI1Lzau/al728yD1VmQ3JsXZ4HC9+dzzNcAhpzg8ui8K3QQwc58s12MeV4gVAWi8wQAibSjbVRE56WywIjq9Ch/TCR/VEQLJBIocYC0dslA2ScDBB4Zyg2CN6PjUWUjFNFS6kU7CpFgfuEBACyuH28Ib4TBEnaCV5oSBxvZvXQmRYuU0bGM7MLwYOT+WB/jCv5q7Ioxg5drNhkWNwZs0zma14UejtcFoNfiAMeELlB0CPjnvEbAcVFknFIX2W9OiKJpFwa7JAB24B81fS82tTlgywUWDJ8TEM2om8gpDxF6AOSHMko/gslscIRmKR3nBBPZheW1ahtJ6wwVAc7wT8Bg1E3gBvgGSzck/2R8Y6IaRTSWXk2iWJG6GTSDQYIwAOwM2JRs5SbqtWxmRuuTD/8P7SlBDq21LdIRXvH9Oc6VxorNKCxPbh2NOFTbRO00KWz1Pso5BCooIeHYnj0I2+QzFIyV1l2EBkf0fCYYnywjcGe2+IsplyTll+dVVLgfgZtX1NtcJ1NjzwuGV+1TJnGpLX0TtbHN4aKZdvQpJR3Dsob10/IJfvsb6dc1gclr5qKGhUIFIu2S2xCA8BGx6dgRA/gDlIlV2/ckvbQsIzh/Lk6EpFKnMtJUJzMlmK8n6eM0FcqWzL3ONzL6ziMz0990SdftAenm7FbzVJV5JLHd8CQdo0vAu5l34xtjQwYGfjyZIA3NASeg2Htu6y81L+g9u9aZCWZnPFBm6//KfgxFZdUyiuvfF/JWzU375m+Oddn2HJmWyEABLe7FMX5myjgT6iCPv9OHH8HLI1jYFo0yTe++R9mSWNR6urGjc8Ve4QAy5NPviLPoIDlhWHk0099XZmyN0Bqi0bnBA/0/li4VwAAgAMCIPxto4cVgQUdZJh/XzIokp2XK1c+VW/vP3hEDj/1mpQAaGDMnTHM30IyWV588fsKJGLQ7J1gkb7u+HgYJq0fQbP7N3Lj2ulZXiVkr9TW7ZAnDr2sQCO/r0iNj4DJDYzjjdd/DD1ou9rXGzfPQcLlU+nvuz3tr0JZrv0HXlL9E8QYHRtVzBfKce3Zcwhmmn+pgKjc0MCVEcyU/YWcOPE+iilPyGuv/Yd5AZBZGxovZmWAk0QM8GPpk2JyHuncpbda3horAT8irTcU+HHkmcfm7SQK1v2xk+fkR784Lm6vSf7mh1+XvVsb5113oK9b/vu757D+TYApm+RPXtorpeVV865LAOzv/vE3EhnNyF8d+DN59uCT867HhefOnZP/fPSX6v3XdjwlVT5tAteCGxhvLJoBgredHV0Ana1SU9O0oJTjoo0Yb677DPB3n75VZ89egjx4+4YCQCjh1tnZLdGYG5MmGtVkjLnXI+v+ABkDXDIDPK6lpbWYLBPAhJrr0rJlq+TJTC0vmZ6Sy7eG5ecfXJN4ckYCi/6n33thi+xumn29t2SH93mFVWGA5MMDJV0r5kljQs59PnyLNjduGpBohlo3Rqy3DCwLAKFReS7IEcjPaEwEFuNtGgjA90nhVuvxD6DGXBksvia3QFtXq6YTFPEEipQ/BeWvGKnC2b4ZqThknOCCbvPgg9uPGfplmF3Z78WN8UfS8/FpkV21Ej5zXQo6OyB/BaYEzMEZZVW4AAbYQvYHo9gUAlOlSDIO+4yZaAptwzCbkQt+kNGhAI5kz7T5eYNXmxlhCjklAtN2luF18IT22NxmKmYWKySncv0/6CliBejBR4If3CYO/wx73CXJAptcqQ9LBEyE0kSplHnxBYV2GBn0kQSrg0HgIzyZJz7z7MJKd19Crrf2SP8lzCR9AjccAHbom8IWvEA2zPQhwT8asjJ4I8F5spTDcgJzsoFl0WmeFKdpRLqDAbEVmcUHloce02wQyF71YMwEMSrhS5IGwyRg9silDHw4TC0oPtiRj7iER8EAwszWaFQDmqIAnIrZHtCbnoHZQANZIAyyOsgEgbCVimKHS/mLlGaKpsGPkUmcOSYgI0NYnytlmSAlngrJB/hByS1GSWW9emRcdA0q1ocud8VllLzah1R8iKLI3+usD30DPM5lfehgC/fLkj8GVkyhYnr0AZRyQYgtnsigiDIDhIUjYOZAXq0jEhbfpA+zbb3qBmI9xDhkz85cG5TXj80wYSwFeXJwa7nsBYiYJfKsh6EaYzAyYGTAyMCiGQgGwQCBFCep3V58LztwDfGwBEGA4qJyee65b6sh54GVp4MMOiM/DclMsjwikHcsABjiwSy3AkgpFhRYUJBqlk8+oYzUZTAuymYV7UeCA5oJ+cn34KtWjxv/JjBO4eUF+ZLDh7+2YH+8CYvFRsHCHdb6c2PSApi8i4EfbIxgRWfndcVS8UAqhVJdZJwsFQRBCDZQHqsA10H6JAF6iZw48R68Sv4f1SZ9SsgocWEfxpGPq1fPK4mujvZLitXx4ovfVf0y6PnF5WTDcj8Y27Y9Ki+88C31/PNLp+XkiXdkZPinStalKPDn09Jpum/Jo48+dwcAQsCqp/uWyjnHRADEDpDFiOVnwJC9WjhXlFHV2fUEdWMPiAWSC35UbNkmhw99RSx+L7z6JuGfNyExfEfojwMdN+RnP/o70cEPMj8IdMwX7310Qn76xmcK/Pj+d78mzfVV8647jnuDueCHC6DrfO1ev909C/zYD9bWQmGAHwtlZuXLxzHBLoTjVRQoV785RuF45blc71v68BmkZxk9KzdSJDGJY2h4EBLpZcbv9UY6sHP2hd9NRYESdR05MDhbDl1fNRKHzGswJsNQnGGw9sFIptben3U1GCD0/7CYZk/anpNG4+V9zgDZH+FMh8Ty+lF0vc+NG83dcwaWBYD4gKTqBXgHZrN3jcbUDabdbpfiHKNzJW+VZYPMBT9y9W35PFcKywkCgMui0b7p/UFvC3uhV8yFPuXRUQBZonEU1wl6KB8Q1MJd+PKKjAGIwCzGgi0+GQF903wVZxj2iEV+hg/yDvHgqAI9xrK0Aj56ppqlBBP54ynCFjgvs6wPyl3p8lc6u+NykXZBYAl4pB6z/wl+WGzDYsm4Zp3Pij0CwISPZHvkhs0yLlP5Xg0EwRsEOJIJ+Hngz+wql/Cghg7uGi8Rs1N7j9uzbE6/icwUDUldABeQqDlBFghlsI5dPCd/vPcRyUfRorDaJ8NdYTVTVBefoFY6952yWfaqlHCEobBV/JU2KZEKgBCaRFViql1MQ9WQumqScCwIwEPrkOAHg4CIHwAIg5JWl+CAPtA0LLZESslcOdFwkVX7kiW7ww9/jSALOWBw6EGwg0wPSmCN9w/hWKB/8GzYlbesXALJTXIJ/iKlTUVgqGjbUYIqCsAlN/Q2ndmm+dpcWwxYYlgu9GqzUXXwg5JXP1Ssj7h82HFOGZ1LycxxymV9sI8t/hbVFVkmBGgIwNTkp2Rg3C2dWO4Mw0AeQVmskdGIYtPcgiwZX4PPhHMLRapSiwSDfZiFOwXZkvXz7ed2FEzLr1ESyw6QzAgjA0YGjAw8TBmg75I+MaO8vFysuB55mILAQi4zkL8RU1PazRhlrvr6u+X0mfdVwb0ejI1mGJLbwH4kkEHN487Ob8rrv/6RKsrzPZpBMi6c/0Q+gscGr89Y+N+2dd/0hI/c/shqIGuCwf6GR4YUs6QdxutkWWxu3q2Ak8WCYA0ZFJTLiuJahOMku2Q5cincj/mYFpT3+uCD1+V2200wNY7IH//x/6r05ynTlcTkEbJbyPCgWfvRo28r9siePdrMcLbJa9NhFD0oi0ZfFZ0lQhCjpWWfMng/feo9uXDhlBw4+KpiozQ375Xjn76p9r0Nf5Qf02WsuI8pTJRpa7ukjgUZJJs37VWSYUYsLwMG+LF4nrxulwJx0/gcEtSNxe681l+8haXf5Yz+Y0ePyY///keYNGSXrWAq3+q4gj/ca+AzM4HvC/1xDJJzZ95+XT49dkqe3VMpXb0D8sa7H93RSRyT48aSefLWB1/Ijf4R+cbhJum48oUMtt+ad93O3qD84mhbdiLXJvn9yct3rMcFA2EySm7KmbZeea5hp9wYasMy3L+kxsRb4Jn1yPX/re2Yus8zmB/zpnNFC9NgIZHJSNlBIzZ2BvLzCxQbM5WeH+B8WPeeXkpj0aQqjBuxsTNAkJZB5vJkOoWJUTMTTynzXQ7/jye3V2FywcwE64DPLj7X2p8bq8EAgWMx5iEb3+Wr+SlICgC3zHVJZ+/rVrNvo6+lM7AsACScI2AWi0bVDSaDYIgeOvMjF+jQfUD0xzzxyxQYAYoBgg1pVh6001Qdr+DVkGt6znYt8FpwlvokbzIhU2abAj/IBlGACKSwAiUwNnpmq3Rv80hv6xnxgCWBaYKqyK+zPkoqaqaNv/Wx2hJd0LrcK/WbAGIAtNAlrTQ4RFuLIAXmDagXlL+qTeDLFTfCBD94o2ABxsJ17AXQxEKQPcLXBEE0MS+9N/hATHklGULBHTfRar2Zt1Tf9BehwXqNu1aBIlMp7GfW+ySNC9DkRFZ7Kmc7S6BQqsBs6R4cUfrIoeDtnHe1p2n8CHDQBYECyTNrX3zMy6TNo3JiecouyWY3ZKXKxTcEI+9sC714rQWPEiSxENEhnZ8hEpzwCYVHxhuRk4Fbiq3RtGuzOAbyJFY6Jd6JaqnMtsCH3OeUtGLQ74PPKyFz5asqFm+39hjuHhJfabPI8XZtvazfh9oGzwlyRIdmDIV0hgnBFoIf/fCB6R/UGA46+HHIvE2+1QkmD27i/vFim2Zyvgj4wb4IzCh5LvRHwEZQ7MhEC6RDohJzzPyA3r51WVopy5UdJwGQmkIbLrrgjYMLrzB0J/OtERSa1n6Wgddpla8cqpd9LbOlAQpxEcD3jDAyYGTAyMDDkoHRsTBm8oMBAmp3IBCAVNTDO7uJYASZG5e/OKVAc53pQDYDC+4EMjZv3jNdlKfsE2Wm2m5fU4wGmqlTAoCFeoIHwdCIYkYcOPgKLok0z6y5x5V+GZSM6uluVf2xLcpNETigJwkN2ZnbxYIz7ygnOgZGJMEUymeRJboUa2ShNifA6Lh+/bzyBSGrY9++pxT4Qa8N9kVQguDEE4+/pMaqAAsAE9u375/VpMVSAB3oJ9Q+6CALwR9KudQ3bFYMkiEUVTmhIy8vX0mAlJY1KA+TG9cvqO10CS82THCI7JEUAJiq6m1Yn+bwuVdyC+3Rl3u54fmxvOPv9fvhSaOxmIb6Q/g8adfJy9t6eWtxRv+58+eUN2PT5hYZvH1L/TESfhvY4Inpx+GJiJz77HMFfuzaQub7lAIlPJYpBXjoj+CuycWrrUqOl+BHbRWv9hde99PP+xX48dLjTaqN+drk8o5uSLX2hWRfQ4UcaNAmIxH8YMx9PNUG+VvEi837DdkrlQkjjAwsPwPqdxXMUsYE5LiNMDKw0TJAo/Nt9UUwQndjkoE2QZr7aLVA9nUd1D5WgwFiN2nXFxvt2K7n/RkzdUPlB9XSjYUrr+eU39XYlgRA9MIt5ZdyAQ8+J9hRClCAykYERvT39VmZumm67h3icoCCNqD9wKp13FYU/DELMDAzDHqAxDHTj3M5Ha4iZVY0HmXBeaboPB7T2CIEL7q3w2y6vVUCZ8bUGrwQ14Nl/Oqdj8qVxB9QtZ/JSweKA86RSwBAnlYm55L0KTbHtOwVnrPthF+Tv+KWLTG0BjyArJOAzSTjYZvm0wFAgxCIzuowu9yiG6DrJuf0AJkGPwCSRGCW7rZpwMm5opmBuSCdNGWG3BhmfxVMRSUcn5ICFAQmhvqz+6VR9riPNCrzYZYmwR4yXngT0nrrmjQ/8oSUFbqkbzgq+RMAPsYdMuUaExtkJjhDimEDKNKDp9F4p2TawXBpv6GWT8ec15niQrBCtHHyeevQ7PX7IIPVB/+VclMAIEGh3NRNPGa3KmTRzOgsISdKcwmPAJjUczxuraqFfJTGiCGYsQuMEHqAMDoxXltwNpMiARaKCedLorxAATH9YJXkSl4R/CjpH5S3j1/QvD5ygA+2qTM/PN1oBwymUce4kuAaimIMwJ3oOZILeOjLCbzwvWZvuYRMPWC+VKgxMgh8pHHc8tfZzCkbfuy31QXUn34JgI/vugxzVuotvwC5BJsm95EDNpYZOTDOA+23NPezMDWV96WQqqDReBcMOznTn8XuqsoqXH9ov6nr8gttiUGRndDW9oX85jc/VkX9OH2xEGRUvPqVP1MFeYIeehBgoMzUyy9/TwZg4H3p4icAgbQJGwQPKBv17LPfxDrV84IRBFyGh3vlnXd+JhfPf6z6Y3G/vmETwBYCJ68qk/DlABkEXShbRbDEikkrOnNiiV2+420yLWKQ1qTZ+9hYCAyUvYrdMddonO1XVjbCr6RMBgbOYf12XB+NzWKrUDKsrm6zeNyzhR05S5DSV3ok4b/CKCqqkC1b9ygApAsMlKHBHiVRxv2nFBmNz7u7LitQZkvLzmVJfN2xg1/CBePjEfn1r38p//hP/yzPbfJLobtKKG00XwwN4Drt2IUNb3g+376TrRUIaEAlwcso7qceRPC78umnX5HtTz4thV5cNyddQob63Mcvrl+XwRvXYDxfL88f3LboUAhY8B7klUO7ZXeLNmFqvg26MQGpo5vMab/81fdfnG+V6WWFha3yxY2QvNBwSL5/4GtqeQ8milWqO8PZz8kIef/6aUxCergYgIsmwHjTyMAqZoBqEUYYGdioGSDA4LQVqL/1GA+aAWJDLcqW9hn+H6t88BOZ8DTLfpW7NrpbRgaWBEDYRhSFeYIbBDkYxWX+ab3a0REUxu3Qjc2yQTh7ie8zlHF31ieEJqU6EKKzRMgIKSpMAgzwTrM/gvDwID07GI1LnWpFCx1MyMsbFVPEKhNgSPSPdUB+qFUV5wmozJ2rSJ+L6scbpOvzs7h0hrY1wAIVBA2gd6nLXFG2inJYfK37b8w1OC/z1Cq2BkMBGDAnzw2uPzQVlMqsf4c+Xq7DsTL0tmmWTqYHmSSMaYN17ADBjyi8UAh88Ku6v6dTmZPN9f8Yam2VZNbbRLWBG5DPPjkqTbsPgvqnlbU1BghuN/Ixu8xvxw2EFpQWKzQXwwcERf6gBmwQSGDQ14N+IIzOdIGSfYoOaeVyshxKgv0yODWm3mNU5BVKb19QPQbMWG9oRMlFSWgAHZeq9vQw1yLnHUOKLUHAYhCyZXxfrY/g87AFM0kBdDDI5Cgexs0gGB8EG6onArAA0cARvuZybsO2LkSvq21yJa/I2mkFmHMUcloVCxidVwU96nxLQD8+LiPijaGIBrLM2Aik0uBNw7ERdEnAfyQKTxLlZ4LjM4S1GSFrD5YHpBw3X63Dt+Q2xuRq2ic+r12BIDEwlvrG77+cger8LoOGcCNjCRkdnw1Hu+BNU+J34LzRALa7bPaBrE7Ao6AgDXArJQX09UHwtR76c2OZkRfj3Jj5nKRS+ZjFt/aU7gfypZDT6DCYHx3t7erisrq6VkpKStbV99fd7j89QKz4DSkprQKYADAiFVHsFpp3f3rsHbxnk8ceO6LYHDooQWCArBCyQ/71l/9F3nnrp6pbMjiee/5b0tS0fVGWAmUhqJtMnxC9P/Z58uQH4nQVitPpngYB7nZ/Vrp+DLPTR8C2SCZTQl1yJyS4CIzk6s/zNVkmfJ/rDY8MQjIoOgsAsUMqzO0uVMbquaG/5vv69Szf98JfhbJWpaUV0zJYjY1blexWLDYGVsqZGSmyZkqKbfzP2EqPYe52EcjUffTWbyWF68IOn0le//DCgs12dcfUNfp3X9oqLTUzYN+CG2ygNxwOh7ggX8tCBYHIUAjXwbheu9/XZJS68UB+tr5uq+apuEAOEyMZ+T3es9us4rPD1xDAyXwxgc+fHZ6K9P4o8mBCZru/0wAAIABJREFUnHexzwV85tAeJXsXa5P9FGJyHNnqBDfcYP/zS6AlM9N2ixLx1cIAPuY7MsYyIwPLz4DOAFn+FsaaRgYengwQYIjEkjLKyan4XdWDk0ILPfAXxuNaxoNmgND/w2Na2pdvLXOw0fqm/weDsqZGrM8MLAsA4dAJWujeHe4E5H1ww6kC4AdDN0YnEKKZnFvUDSa/V/hebugMEXBHxBLHe2Ay5IZnKqUIG5lJDUghmKBYFFwGZwxYn8qlEGSO0LbFiwI7mAX+78N3fXT2bD8r2AP91ZiV+ORj4umvE3OOD4W9phhG7jYYolvFnB+ToRRmT+I8tZs0kCOO93ohCkVwwu0ukdiwttwCiSdGLkASw/bmBTR7lfRVThD4yA3KXzEavA0w/EBtHcX1+ECPFPi0facsmMZqmV2c5jL+0RuEwRsQymD19/aKt7gKX+pO6HrbUNjgDHqvFJXUY62P1bo2aP+Og+LOYEG/MN8s3eaEAj6iOByDg3mq4E8Ag8V9en2E4H3BIPjBqA4CrUGBRDEiMHFNAScANjLhKAon4OKAKULWDmWjdCkpgh962PpSsqP+gJLCqoAUVvhaq/IFod+GDpoQWDE3NSu5NAb74riSGBoBm5JHNyvWRx/Aj1zWx5Gy55Vk11GwUiqujcDhRIsajKkTYBlZH2R8NOGGHFNJpbcLYFM1pMKyDBl6iFh4/41c6OAH+0xmmS31NsxktPrkOvoYMQXFlsmT3lgn9ntEtmBmar7XD/ZHnnR26JBTdgBr/DA6npR/+QMAoc9x1iU1Jo0dH9BHN5fLD1/dKqUAQdZDsMClR3IMQBIYIAydBZI7RmOZkRfj3MA5gO/5XDAw9zOyEZ/39PRI2y1NvqWxoUrKK2YYeA/j/hLUoIxTZVUjivpQjoUZefvtq0rOiowOyjUxDh/+6ixGBFkhex95Ts6c+Rj6/m+qdfYfPAJPDLBbF5C+0vND74s/+f7/rsADyli1374iJ2Ccfhp/P/tpGzw9gvLqq3+O65/FqfMEAwjGEIyiFBbbupeghrMeCwENZIHQB4R90tNjvj7nY6LQ62S+NvNxDURZq9q6HUoeS5fB4n7R4+Tqtc/VkGpqN6tjZMhfLX2EedVPBnlVWam8sr9atjQ3SaVfu1/oCU2o57mPV6+3yvsnLkt1RenSjW/ANUqKSwH4uZSXzuDQAO6fcJ3qmZlAtFq7bIcvHO9p1lXkIqC5A8u9WFxXAzYGY2Tg4cmAwQB5eI6VMdK7z0AyPSWf3wrK747flFC2lshWKgrd8u+e36xUMdYyHjQDhP4fRqx+BuKZGbn+1e/d6HGpDCwbAGFDlAiiufktsDwYOtNDl7OiAbouhUUmCMEQinSMAFgIZJki3E5nhXDdgFcrunp9tlksELUePrMTaIDgx0Q8CyTgZns8HpETfaelDzP7GeUVLO5DyalJQ3EHbANSmuBN1KBcbEeBnhiBei+gyTDh2Sikl67K+6xqa4EJRQQ6dMAD/AO1nMsYuhl6dm08QDoKUYHGYyUOGJlrTIpJNHjNqRVm1AoABxyDuTcTWjGXXhexIq1z+n8ETLUw0u6Ri8c/FrJg9u1/RHwAghhVJfhyhtfH3NDBD86a84AcHsbszfHBTglA9oI+KYmhNG5AYXiOFHvdZWKr0HJdWFgoCc6m4i4CcAkCyCB9hmAHWRVkP/jNAFAyMC8H3BQyTYlzDM892hgyg04JhoOQDYtj5i2kO7AbZEpUuZBjhceUSDdYE8o7A5Hp6Uc7cTGH4zIJv4mstYpEO8PI3xZJh8Hqgd8Gg9soUCXLSCHAUQtgZbDn9jQrI9DiV34fCvyA/JYOfmwtewKOJXbpv3xZBj87roAPgh4MAh86+EGWh9sBQKm4UjE54o4Rieek1x4LKEBEz8OQdRwKXS4FipQUwxTQb5ZgtwYg2MAKIRuloqhG/JlKNWt2YiIs4+5KmfJYoZccl5p1Mls0DZmVeCItbX3aucu8FEDHvL48BrBsZlaEStg6CYIfk5DfY6TTVjX7mfItnDHBi4bcZfkAWtNpSpbMrGcsy0CaxsjLRs4BPwv8nHyZAJC+3j651aZJ6bRshcxhiSb/tE6+tlY0DBbsS0v4q6UxHurrWpTU0z//8/+tTLs/Pf6ubALjo6G+eZoRQeks+lhQgkoPghAEUPjeQoV6fo+yv0BhsfpjfZGACEEAxu/f/4ViguzY8SSMw/cu2A7XVUwR+IZRAiscHpJwaFAyAAqWI5+l1y8Xqm9O79Q8Tyi3dbcxlxGSuz3ZMFu27FGA082bnysZLL8vAObHDSV/RWP4HTsOqmVGLJ2BnHkMUlnskb112gQlsgZKvdpznTHAxzGwcb/MQRYb/X6uX7sil69dUyyQtQBA4phwZoSRASMDX54MGAyQL8+x/rLuaXAsKudvDkoQ9UTWPRhhqGFE43VrnpIHyQDJBwvanvGLfWrxiUxrnoQNNgAaoEezijUbbNc2zO4sCwAJTc7cXBPs0P0/mAUd6JhhdWh+IDRK12WxCHQQAnCgOJkLfnB73bNDMR3g/8Gwx1Euh/RAaiQk9PtIQkKKElVq/bS2zmPl++XXWQCEzA9ljn2lVRWtrR2j0psj1zVWNSnmq5MyucWsZv7zNYvg5S2bVCFdDxtM94LeUfBL2qfBFSn3KG8LDd6YXnX6yQX6Y5wJCr0xGIrg3z6znvK9mC/+f/beNDrO87wSfLAVUFiqgMK+bwT3nQRJSaRE7ZYtWUvsxI4Td7rtnpnTTk9nOmfm9PSv6fzImek+pzvdSTrtHjtxErctjWVRkq2dlEiJ1EZx3wkuAIh936tQ2Obe56sX/FCsQhVWQlQ9OlShvuX93u+tb33uc+9VzwtrRgEMs709dXLsow+lu6FRWR0ddfV66tRdA9MFkV1epuNC0/NQMQB2QkMnWCDQDy4eHYJZPJgf4xbzhsbxyc5MaGbfBplo1n0TQANBibQR+I4AG2Ein8FE/8SUJc/F+b4Rp2QVpkh3PWQ6QHcfxfI8JhxxPC6s/W7JH5cySFEwsrLwIgcAw+/KVTNxhgsHQMrqZLBLALQETMz5WwUH/TbSUmEqjhktkBRj0PtDwPygJJULrA/L6NxifcQB+mHQ64POMX1v/VryG3slpczql64fYH3wb/7u3L+4KcxH4mag8ZauzxhtmZTkIvwWNkDEfuwT7GD/G8chvQVT9IFGq0/cHzJAWtBGUVmZ+Cbrof92Q1kzaUSfVkikoLKvsjhDHt1eNqNHm6shNbKCtDHj40ODMSahxocFJszsCTb+PTERp3IpJjiN0rbBy93L0xZ7DKJtL9rloh37xW4v1HaXYxuhtruY0wwQCAWlr0yMjo5KExggt241SGpqGiSwSuHh8OWTzJmYmFDTcrI0U3CfICBhBwIc8LGgaTeT8mQlMAnf3t6gAAjDGKcfPfa6yjZRCozBBP4nH7+lvhbGAJzTo9kefUWY5D9y+AC8RW5Ic/N1BUBmCyeYGMXFZVq9TskumpIb4/LZ1mN/WttuKVhTCPN0wzSxAxscn+DgGJHxQaaI+o5g+6HYHsHrWWMQmo7ONlNxT1+DfaWEmO4HxjQ3rxgJ6ZMqR1YBP5Y1a7ZFNIYPtd3YNBYzjamUUjg5pa/6GJXhGb8Upt8Xzp+Wa3VXoU7bK+Xl1jm9nGNDBkgsYiMQG4GvzgjEGCBfnd/6q7qnVEXZVpOnDJAUh5X69KQ7Jc0ZLJ6//CO01AyQlDhUYsciNgKxEZgxAlEBIGQDjKNifqAHQAaABbvfB4EOhplmvnOa0VjmNJr8WUAIMjWBNvg9KTd7RmKe69FUHQvDJ8OJSu8AKJJkST35B9vgtQHKAZ7Ryfwg+LGuoEY6bl6X5pFemYI8Vx60aJvgPdIKdHesc1LyvFiXe1rHl99xyW9OFkdRjuzMfUI2rlpreXTgRXuoKE59OujxcSj1Yzl97bjsGaiSTTVb2S1xpblhcN0vgz02EAJkmF5nnqQlusTROSL+3FTJnrDAg8ZB+GDYFK/IdGB0Z42pF8a5sXrdhwl4bHxw6Ljc6h+RTOwXQSEDfBiAiAJUhqximB9sizq5JugDcuPcRdm270nJgIE4YBCdRTaIIzENv5ElY9XV2SQ9MPZkeDI9CmiotwVRKoIgAaUqA1D0NHdJf2+PemUwCIJk+dMkbRiMIAAlRZCBahH4gqgPiEuGwf6w5LTAGhlGFVuAAEPmiOp8IQzYQvCAslbZOTU6nd9zAWSM5FrVnWR4FPHHgxxX8Z5a9fQwRuelaTVCn48H0/KltfuSNP/4DfVS7y+zgBwCHwxjdE5z87RRMlucCpQFh2/ViCQDIKEROoESfhIwI/uDf8MiRINm8mTG2ON640UFVka6AOCAsUJADvUFgc/gLd2d7xmpDvnGfVUAQGa+VNMzhvNWStDIebaYS7VwqCrke3laqHFbyP5G2160y0Xbl8VuL9R2l2Mboba7mNNuA4EW2B1qn+61aW1tbXLxwjmAm5b/R3VVNRLgK+f6Fc14M4F/9epZ+HsdgnG4Q03HK8KwJjifYZ6nTPuj8Jg6fepDOfz+r/X56lvf/mMFVF579Sfy4Ye/BniyRvbu/TpM4pMxVhPSCHPvzz59A8CBX3bufPQOZoe5rtJvhB4ZfG4bDZiEz7ZPBA4qKjdOV69TjmvT5gekZtXmWVkgBD/eeOOncvPGVXlg71Oy/6FnFdCimTvNmskmGYa5eajrPVkv/X2QqsRyLngncL1oYjYGCH1YSkpWS/WqHWB9HFAZrBIwcC5dOqUm8TRJJ1AT6vyNZtuxZWIjMNsIFBYWSlmBxQBrvNmioBsTI0tZHRqqPzEGSKhRiU2LjcC9OwIxBsi9+9vG9gxFvYnxsrnaI/meTTOGgx5b+Z67X6S6lPf41Emov8RbjNvYsRAbgdgI3B6BiABIYmIiErpOaULFfxwq7VLBADEm5pYEFsyoA9JXoQbWACPh5rkCL67e1BTpAXuBYWeT8HtyIrw6siwEk6dxZz8YAYj9rYniK8iWsd4WsBR6NEmv3iTaptUWQRADGBAsMEboXLa3/6J0jxRKOhIEApkiQiyKV8DsPCPPI1ulVir8BZIbbyUgqGzNv8ex7ORAqjiKUWE+CPPNjDQpTvbIRIUFMHjy84XeH1zWkZIuzgB4482/jYacdTdLv7tSKH+V2jAgDQA9vN5RhQcK05OnmTHsDvtvgBB+t3t/8LsJ7lsvTCcZLiS0e3IgOdVngRZJ2TA0B9jUmzAg14dQVZmAxAGACW5RwQ1INak8FYbPSFUp4wLLNHm6FPzyAAAxDBBuA3as0gfwxJh0TBXg1wkYphs5LYIZ6R6wR1DNpkALwBMNymMBbOnuqpO0XvjGQCsvNS1Jt319AswKgAxFcWPSUihSDeBjEBJj9PRgUPKK4MdzowVyn3dCTpx+S5pvtOo8A37Qx6TTe3u8K0bzpZegGnbQhT4R1OsFCBLXg98SYAbl3Qq6ypX9QfAjP2kV+nhN+ksIhljMEQOMcDsEUAh4DOAlVadb2l+WYXsxzOJdRepnQjBkpUQcmBPJqO5LCCoXJwDCeSslwjFAVkr/Yv2IjUBsBO7eCDSAJXns4y/UXG7NphokrKuXPUm40L2nFwUZBgde+bE2ZQzJ7X4bTH7Sf+LGzatqjOzOLFCjdAbZE9evX5Q33/y5AhWPPvZtgChfV2YElyeD4513XoQ0WIkCHZOT48oeITjC5yzf6DCKLyog55QzPXYEGgYG++Vm/QUZGOiVyqoaSFtZCdnZ9pcyW1VVG2Xr1ofVKJxslXfeLpekZ1JUVsuwM8jO4zbY9+6edjl08MVp43ayXBipzlQpKqqG9E+WsjCawEChN4q9DT/GgswUMlS4XEXFal0vmuC4hwsCGzkAX9as3qw+KJTByskpkIb6cyp/RZN0MnViERuBpRiBdDznl1dU4Dz36Pl35epV2bV7112RwUodjx3nS/Ebx9qMjcBKHIEYA2Ql/iqxPi3WCBBgYJFnmjMJ7w23i8WY+4i3y0Ys1gbn2M5SM0Dm2J2wi3vjIZEf1ySuqZKYpFbYUYrN+LKMQEQAhCaGTHozM0//D4IfBrQgI2B8OFDRj+ljnd2SCvkAVioaJkhw1aJ9YGiO7s6CIXd8CpLjOLGQoA4GPyYn3UizQwYryEzcjQp7V7dTWpGwbu3u0wS7MWbnpwE9+uAfEAwYECggkNJdd16aYJReng/nCIAUQ1MWA6Q3dwKSV5mShKS7E2AHGScEOiZvjUo8EAGCH/xM98ejxt8KskYywB6hOboatqMScxr8oEloNpLP/ZnqZUI5r5ESp2TAK6Pncr1cO31G2SqsMmTlpT0M+GEHQfi3nflBsEAjtQsyYn5UlELbG0X0zWCkKM+jB206fTKSAbkvGKWnZY7JcAJYCs3WbzeYAskhypx1Y9zAoGGKwI1Ei4Id+OEJFoAiokAYA7svA5DjIiXFTOP0uLZ+ZX+EimEALIzkXDBN4Dsy2tkhaRXQKB8mm8LyB5nWBMNyBEJSkJwR+FXU5XWB9WFJgRH8oL/Lt+JSpfbqTTnhuy7nAH4Y4IPbIPPDT/wL/VTgA/vWi62orwlAHrbN4PE8FWCk8O/OQctolsd4eg33HNs3rA+CQfyOITDr0xidQcCEod9zuuBXYmO4KBOEkh0WG0e/3KUYwDF24nK7nK+fKehWnp8u+7eVihvA20qISAyQldDHr0ofKLPDSvtog5I0sQrpaEcrttxcR2BkZETOnTsH1kCdpCSnyu7ttUi+V821mbu+fDL6TnZBQWGlnD/3mbz33ssopEDRxbYHwWiwqrVa25rkwyMHNBnPqKnZDEmmUgURyJ54//1fgZ1wHHJTtfLII98GO6FUl3vkkRekseGqAhGVVatVxik3p1Dy88vV5PvWrd/CU+RtJPsLFTRh0p/R1d0JhsibOs8PuaLc3Cpdh8yISEFfjL17nwEw0aBm7IcO/grPXQNyHwzZN2zcPb1PI94RAAqX5ejR3+gyTPTue+g5ue/+p1SCituizNTadbvU9+SLLw7rd/qhxMcnKpDT1FwvX5w4DECnRTZv2Yvff5OasIcyQg/u92wMEC7LdtgefxcCMJ988q4CRjSVp/9KtFJbwduNfY+NQKQRSEhMgrzdKgXbKO9HltsgCpruhg/IijNBjzR4sfmxEViCESBYz+B9h/cfRjhfrSXY/LI1GWOALNtQL/uGQh3DfM76Kr2njcPj9EZLP1RdOsQ/dltiOwOASO26Ani4WjmcZf9xAhtcSgbIYu1Tf0KjtE6elt6ky1CAWSuFCVvFPVEmE/GjkjC5MnJHi7WvkdrhPjO+avsdaVy+bPMjAiBkgKSnpGhifAB7R/DDDnwQwGAQdKCcVbayL7KktR1GnDapKyODZT6ZZKa6M1kEEz19ykLw+5L1BXNsLGCOgfmsWB+kBwgMrxkEGCbK0yXrLGSk8L0b4MeQz6fr56PCkN4hBgjh8pkJ1sWOIAhBA8MCIQjixYttdhuYAwBACEpM+IbQFmSqcq2EQD48HxhTHTCYjW+WsYCdhyfXJ33W8a/zFRxBgp8CV84Ut4ySUYJgX5OdbnF40mQUxoIEcoKj68RJab10Uycb8INgCFkgDMP8sDNAOF1BnID8Ff0/zLTCCiRI4vizjltG6Phk1WP8kAveGqnSNeFC4giMCyTmCQbYpaAIAvA3NuGGv4cBLpQdAiYEzc+nCHpBoUr9MZAbTUnNkaTWepFij0pb+erIfLjNFqHR+ACWJQBBg3OyQgiuTHVwSwDVyDDyYh8CaBKZJ9nFleIrBEsEMmQ0tTdG5/T6+N0kvyRe6pADDSel3zc6A/w4gepgBhkZBCY82R4cu5XSOHxe+5sV2EZPZ9cM5hKra43IGEG46923LOAHU61xscaYfzPIGAkV9AnhmI7dmpKJLYmSQkDIOpxCLb6s03w4Bo9fxrgdrZMxPNjTCMyRFC971hfKno1FCgquhLjXGCB8AOULVKTgC9ZcH0znAlCw/bm8vLHflM05eeIQkqMWw2q2fWDl9u7dT0heFFXjph0zNrNVZnNZJi7nA65wfGgGHe34z2V8ZhuL2LylGYG2lhb5CF5ZTNDT82LtmnVInH/5qpX58rlq1UZ5/PFvQcqpDXJLJ+Xv/vbPFKBwuS0Jx472JvX26OrqkI2bdkMm6uswSS9EUrRXgYqPj/1Wk6WPPvYtqa5eP31ub1hfK/v3P60JfMpj8bykvBT9PR555Hnp7LyhANI//sN/wFi+KXn5Jfpj2be3Zu16laXiOtG8KPO5jb4fzz3/A22L4AuN1M+d+XDGPtGgndsnU4RB8OO5Z3+gAIc598pKV+m2ybw4c+qIjPq8Ulv7kAJEg4M9cvbc5zo9P79I9u37uo5jNEV8vBYwKBOmxSaQDQsVxSXVCjbxN+HY5+Tkgd2yW9wuFOMEWCyh1otNi43AQkaAlahr162R6qoSSOBektPnzsJzrhP+OsULaXbO68Y8QOY8ZLEVMAJ8lhsZGcC11YtntSQUIbqWFDAm4O3DdXyxt8f94D2CBQFNTVdloL9HpSApDelyQ0YHRQE52XkK2N8rz4sxBoh1CvMZYWgIRao4hhksRmFRxFLFUp0z9mO4s+MWnrma7/ljeLbfiKyP60198qsPrqrxuYmKAnj85qTddQBkpTNACH5cm3xXRuLbZHx0XFrlJFBhkfT4/K8kCNAkn+shVBK/6yu5/7Oda1+meREBEO4MAQp/NwAGJNzz8MLN6AuwAZjITvQzOQ6JpNwcTWYTjCDAUZhvvcg7vQ4k7FPF50yWFK+VrHAAKPE4kZDzZCp7Y2iEMlPjYE3AtwPF8klJMJAuTsEnHqTysiUZUkWjAEJMZdLEiS6pT2EGHXrVMEpn5BWVSUNARksnBAUNxEvQFkOBEOxTU/M1cYEFUhhIOsRnFchIHiSorlttc1mnsxXSWJnK/HC5emSwD7JZmNbT6QHbgtbXzN1nSmYyDLQJoPTUizujQKW1pjLQbySeyWAxIA7ZIozBQZiQw5ydoZrbIRggOjNMGDkvA+rUrKqRkqoNujQv+OljTvisD8pAfIc4RzOloHi39DbdlC4/x7pPmQxM6JtkPj8JPkw1W34f/B0ZBLy4bH+A7UPGiH09ZUQAr6FpOT1A6CvCoOQVgzJaCQA/zHROIyhimBTDWTBUxzMG+D9SmrpeMteuknb4gtCDxQQlr76FY2iHr1VOXb4ljS2QPAuAH8FeHwQ+1LcG/SVg44Hxd5Zv5kMMj1U/fGK4Vcq08bhzpDgUFOGxy/3rQR85JvxX4gRTBSCXmTbdscAfHDf1UUEQAKF+ueOzUfGHAUqC11+u786URFBBASwJ/+FngxmYE+fYSop7iQHCB1FWa18e6JIR3+wsoGyAjDsKa+b0wN0LHfyTbQ0R206Fyf1mmCKzEjyahCaPB74EsgKdMj2s1I4UTEjyJfHhh5+P6sXXjM3BGxbzarb2ncmJ8njNuhmmzrMtb+bxZeZsV4N0B1hf4dYx4zMX8CZcW7HpSzMCfEm4cbMegNxZZSVt37FZtm/bsjQbW4ZW09Mz5WEwNxxggxw6+LKyOdrbLbaH2Twlnvbue1qefPI7snXLA1qF2trWqCAA4/4HnlZ2AhklJtguPUUohUUA4ty5T5VFUQ2Zqvvue1LPTcpj0Sz97JmjM/aU29tZ+7CCKg888I05XYuYKGAfM9KzlHlCJgmBDrNPSXgeopQXPythKH7ffU8o84Pgh51ZwXa4bQbHhf0kGML7tGEXk6FB8Gjfg88J9zeAbYgTz5WcxyIa/m0Pc91LS/eoXwmfMR24XgUH2Sw0gr948Zj23zI/r9WEl9lO8Dqx77ERWOgIsAq0rKxM1q7fIIePHJMrl67JmdOnce5C4m0ZQd6YB8hCf8mv1vomaXzt2nll9lGasRQg9jPf/KeQDVz8+7PZ3oWLx8GCfEWBexYAPP/Cv5DiovKon2+DfyW2S1+tS5dOyufH35VLF0+pzGJw5BdUqR/UrtonFPTnvTfaZ+rgtlbK9xgDRACmDeuxe/TY6/rbszDkqa99TzZvvn/RgS77OfM+ilRuodDMnDORvNMiHTPcDxauffLxWyhEOYo8U/sd/nEs+NywYafU7npMWDDDZ6gv+zEcaVw43+uf0Hwcg8XVKyWWkgHidyCPF7n+MuxQUPaKzI+BqSa48d5OGQ8L2DSoikY5c9h1zYz5sETCrRNuesROLNICBINaEj8Rh98jeXEbp/f/bvdrkXbvK9VMRACEFQ4lGzfAG8F6UWRKm4yQZLAu+MlIDxj3GfCD08gUIcDBABwiDjAzmHb3ZLulxzuuSfVEJMoN+8Pns85QJqD5YsrI8GQri4HyQWRRTI2lyMjgiPQ1dUI+yoqUVI80A0AxwW36uy12RDBrgssY5oT5HIcW9TiqEr0ALxhDkLvqA/bhaRyC+bbFABki3QFBsINASFL8Tf0kGMJPAh8mugCyJLot1glBkGnpLiQTGWSF9GZ1yy0k1+n/wWR7VQ4YM0Clg+WvphvFHzofFYtktATvF79v37pNnIUF0tU7CqR/XIaRbO0dh9QWmCKOcYA9ad3S2Qi/islKAD6TktY2JMOZ8L4IGLYz4b+qZrX+phNZcQokEZRKT/Urc0IZQPRXYQDQUCZQwp0VlN0TA5IwYt1gjCcLlyVTiEBVXk2FdNTVQ04KAbCMoAI9QLiNwrU1Urx5u3zS+rnQ/FwKsVTrgBTcSpI/3pArGWDKvNZwxsb6uJ30MUbnlLwS3Nd8g5AdCxwkPZDlYBj5K2P87kjp0qSKkXXj/hl/G4IeBFE8xeXKWuE+E1AxYJEBgPhJMIV7zOUyfQUKIpGVk5WQo+sYoEc7cRfDnZYsj+4slY0VFkBluuJB4p3zVkrcawwQgh/duG5lZ6TiHA99yWWCvqlzQNZlsjIZ17ooPa1HcMyeq++U6oKmKlxlAAAgAElEQVTwXjNeXD97hry67WwPKjZwTY8UrHQeG/NLc0u9Ss4wjDRP8LqsxvfhxZGfra31WpUXSSrGVGNzbBiedPhLAaTJRuVVN7ymGOZvAkctPYO4loHBBVm8aB/UuQ1WctU1AQRF+wSYQoUX956mrgEdnxgAEmqEVsa0XgDyH354SKWPqJO/ffs2KSiK7FGxMnp/Zy94HPN4fhz+HQQomptuSGPjFfXnYLjpb6Hm4qtVporAAI9pGnF/73v/G8AEv8pb8Zy2nxP8myyK73//34Bd8gMADihGwTpkl9FjhMBDReV6sEsuYixvwmfEen7i9vLzK3UemR/zqXzkOuvX71R/kYcf/pbuU6htFJdUaZ/CvXi7XZ7pcam/ecm6rmBcyF4tLKxAH9fp+nbPFDLE+DL/r//0P+n4cT6n2YMAEsEVjrdZJljii8sQKOI2OMapaagSxHhEc92csbHYl6hHYBT3jljQvjBddu2qlTd++47KYH3w0RF56qmvLSsAwt8h5gESOxqjGQGyMNo7WiF7+IocPvxbZc3xOY1JVzIWFzu4vZuQUGRy98MPf61MRvpWMUZGrM/5bJP3VRYTHTv2Bs69f5CbYF4SrGdBQDZkvZmTIKjeDc9HFg2wWOHUyWPyjae/r/cTT9YKofnPZ+exzleZAcJjqrevWz777N3pQpTu7g6pWbVOdu7YP88RDb9aqHOGx/CWLTv1nCFjfb7PGmQHUzb0tdd+qsdo8DHMXpFxzOOb5+rp0x+gkOS78sij35lTcVz4vVuZc+j1sabcI99/ct0MCaxsFCiX5JpsYvR9ZzGWD++WgwN4b0RxQnp6Bp6v8dI8z4jydX9erbNYrG+qAQofZfNan54flL2CHr76LpogsOKH7D8yZRHbtUtFGfmoobh2aOL0h/UTCScvZaYTmHFA5z7cchE7NY8F2HeOJfMdjjiP+OOs/Y+BH/MYzBWwSuhsnK1j1KV94nd+ILUD7TrV1z2lLIy4ASTq4JvRMd4vPW0tkrvaJu/Uj8Q0naMR9NAgwJDpBuCBSolsd4aMjFpMifgpnxz95DNoKc0cCT5oZGTkS4GrXGeQBcIgcnsruU9y2i1mQv7qTdLX2wlJrCzJAxNFDblTXOLIHkcSEZXuSP6HAgvYVkGJVa2fV7kGiS+YeY9OStIoZKF2rAE40Rmojwe4g1OUAAjneX24TPl6LTAVn5aaklcFqKYmveIdtS6AzskWBVToASKQ1jIm6Pz02uS9CobjJQGAUBvMzw3AoTuqMTntY8J9sCSxbmsXcnkGp6/PWyNPffd/lRNfvC6NMDDtbtuEBLxbgSPGMDw30uOtddPT3FJckyen4uphEgKQCcAGwYlMGLoT3KKc2CicMRgEP8j2oFH4gO1HMjJoBC0MEEDWhZXK1FU1ysEW6kPbCb4BAFbW2BD86EP7djN1AmfZNetV8uqVlrd0XWN03ignFAy59sER6aPhuE3yiswPA3xwHfZhajJL4pJ6AX5YxyPbVpAFn8m4VJPB0dHcPMNvxgA0BOUojWVnMtGrhGAN2+A4DUKqSw3UgSFw7BIDrCcCJPRG6Utp0/4zVCIM2JkrTOJ1esFl+iPFkSDryjyypnTmDYvSCwu5eS929+8lBoiRXiL4sbY8UxyJocEHZ3ICEvWRWRbBY82HaSd+19U51vmVHgcKN7yMpj9RbT3kj5MzkRWsgpvWl6LRUes8ot/Cjm/vlLLcmeAZV6o71iyff/K5rm+St5GkYpikJcOEQXCC/U/HfvCKq6+SnOex/h4CoNszFPFWpW3Zg9vwI4HJ9kvgc1MSRueVnk2R2DnBbce+L+8IUMP3woWLcujwh5rwuH/v/bJnz/1I0ocGtZa3d/PfGo9RAp5rVm9RJoTPt1/PO1ZkJiVBehPPT4m4ZhhAlMszKcN/JkKBpXyBLi2p0H/By6nPBZ57CJJQsoRhtkmTb0dAwjNUu9ONzfIH+5jtydV/1j5Zz4JmFTIpuE+McNtg/xMT07SC2LRh7yMB1uBrTDRjw3ZN30x/gvvAZaIZ41mGIDZrjiOQ7FhZLNQ5dn/RFuez2I7t26dlsM6cPC8NjY0AOvOX9Rkt5gGyaD/pkjTEBFxra4sMDw0iQZ+LgoAsFKRF9mpazM7w2fPq1bPy2us/VaYhgyxgAnezBa/bJoKvvfb1zHJmGW7vxMkj8uqBnyorkAWB3B4LbxYaZAoT/Hj1wF/LlctksJQru5IV8pS8sqS5Afa0N8jxzw+q/CS9u5irIOv5wQefCVkwYN9Xex/D7XfwPkezfrTjye0Ht2/6dLcYIN3d3QCVOsXtRiFpVhaKTJe3EI8sdBZ4/fY3P9XflEF2KIGD2SLaMQ8eb24v1DljjuHkgLLKbNsON4/MD4IfL730l3ps8hjeCZaSSqfajuG+3g5lOJGhy2N9ZPgn+pz5xBPfnVFQYrYTzTFo71PwPkezfrTjye0Etx9uPOzTE3BtLMvPkHzP7aJZzk9AUZAjcX7XzfPnz8vf/+zvpBrKKzt31kp5eZlk4RieDxhiuyRGszvzWmY+SXqu0zt5UxP+wUEGhCMuMnjENgh2EDjwTvVOAyZdk1eEIAr9RLISUJgNY3U7oGHWI0jim7KyrSlxVqEn+8Q0ZXncvqgYKMF9X8h37kMiJLlHpE2ZMeFkwAiO+JOQq579UrKQrsTWXeAIRJVVun4LlYl1VyQxI00Ge7oBRFgnQw68M0wQ6GB4wAbJKLFebn2DzRKf5Jai1WXQznRpFRNNTK0XexxA+JsACJkGY2O9WmVhwo3KSE8+qvltQcpaQt2QFFyy/DZ40UldXSh/0D8OVHdc2xvot17oWSna1vTejPXNF4IfmaDKOpLhbZJu9ZsAx1gyHUluhxdJuKRJn8QB5eweHBcfDDxN+EYsEIbfCRqQOcH2GCNDuEjHo6o+06bdy/FBe2SAjOQBagCpgB4jnupVuo4F1FggBcGN0oJsVCDUSEdL4x0gjq6AYGVK7f075Ymn/ydZi2WbmkjZvaUyIVWVFUj2O7QqhlWM6RnpkgZNVkZmRpFUuaukpXhMpWFU6grJ/WHIRTHIlKCsGUEDN74n18AMPWD0zfkGUBjvtTxD7HJY2oAtLCN1UA/BIOIYcay4naHhFAAVMDqvwfFSu0au1V+V1uHuaa+PJwsekxfgT3FgslHeiu+Us/Ej4gb4cc6Jqx6AD4YBPxT4oDcHcrMTfkiSwZeEYAX7aTdfL8nMlxzPiHT1tEgi+uBKJeRjLct+0qCdjA3jX8IxUG8ShBrB2z4piZWczXGxtkGwwwUvFLI99G+wQBh2jxWdcBeDScSO3hHpCJID8qAKogg6mMv9AhVuKO41BojZT4IfIWm3uGRR4mmuwao1O9OCknsMwK4zPvlloTJnaenpCn7UPmpd4wKbCHwUy/kzaXN6EbX6HqotXD8BsKfiP/0cn/nAOnO7s38zLBPdf4xvyLHHvFEc/7FY2SPQ39crB997E8fZBWUiPbr/QfgybFnWpOBSjZBJhvBcNuADt2WmBydLgr+H69dsy3EeE/1kUAS/fM62XrhtBU+f6z4Fr2/f/2jHxb5OqPbMtGj2L5plZtvGSp83OjoKfxPr2SYHz5HLnXha6eNzt/rHQpSCwiIkXnfIxygqoATb0aNHZePGjcvGAol5gNytX39u23311dfk7UPvyLYN62XHjj1IxFWDHVeIJJxnye+LvGeQ6Xvu3Edy/doJ9Xmib1NLy3V55df/PeyOEMQYHBpQlkgqlAFC+YQYX4T+gX7cDx3TPgxcj+AD5RC3bHsIScf9ymI88Er47YXtiG0Gt0f5LjI/mBCm/9Xv/d6fInn8KFiYnmmwnquwaGD16m1gB66fBkuOffy21HBa+eppJqaROOoCk2BkeADPxoGCPLyDZ2bmgvkJdQvc7+3BBLZhs/AZh753ZNf0Q07Zvr6dORk8nuxvKPaA6Y99TO+2dBdBvHfffUd+8auXZFV5lTxw/149hougZJGNd/HleB9lgdr5858oE4KyUJTVZGHWi7/8y7CHDo+X/oEePYZ5fIb6Lbky2RjB4822v/ji0LQ3WjTnTNiO2GawT5S9orQpmR0EP7717T9WiVD2z/5cOY5jmL5pq2u2T4Ml9IJbu7ZW2bssJDHBY5JeOPZjkLKhbhzDhpVs759h03CaOYbJrqEk+DDOXwbzUDmQYzbHqn08w10T7P3hOcLCNm4/2mN4CsfasBf5rgEfWAy3C4lTIAGe7U6RNKgPzDW6UCz7+utvQx76lwrEUpJ3165dsnnTZs2/5eTloZDGyrlFantJGSBgbXjjUFw5D5SFwMWwZdZ7xy6kSZ4CFpGCMlnT/iH0QrXvLMAB+on0Jl5WSSm2Od1PLMdtq+9IwEOVwANjfAoqQvjbOZUl5bIvUheEbBEyWRgEUxSIicuSzLhySZ+an48J2TDDcR0K7hh2jZ3dQnCE7BsG+2pnz0TscGyBZRmBiFm3ifExGWisk4snPtIO0buBQb+D1oFuXDhAQWpPkrTSRBlAwrnj5nUAIMXTzI+x1BFpBQNkrLdl+gG+DBfnpCQAIN7haZ8Is7fGON2VkSlZ2ZkSPwEQAjJYjCFIKPXUXZPUsQHxVadIPSSdcgYHJQXIqwPt9SBB7kAFQUZGhlRVrZHzqVZVip0FUrO2Ssj6MMDH1bqbeGj0SXY2HkgAgIzkpar/R1HrmEBESiDEpMDH5MSdCKj2ySYF5ke1NL9T6omvltWrNyv7gwbrZIIQ/IhPteShuO4ayNYkTt4GkTgtERJSJf1T6mfizkwBAMKpYBsgAbkW1PjComJpbQF1A7H7/iekElrVeSVWVTYZNjSF74RxJ8chC+Mw6OLVxvqZeyD3leEaFn9HotzohzSFu1/yWXGAf9keVF/4wIAAMGAkm1bXVOJh1SfcLwbnERxgGGCBbAmCBoN0RLIFoSQyPfAja9sMBYgwPgwCFflbd0jSlnz56OYnOs2wPh7L2CDPj0xK0+fHZfLKFZFtHmnAS3o5FwrB+rAzMrShQLBfDAJs48OXYdI+KB5Umph/nMfjgGPG380EwRBoYYkH/yQgn2X22yzD/SdwRKCFwT6YX1YlsOADstKif8gvb3xyU949Xj+jazsBIv7wmY36ILAS4l5igMw2nlkJPQDLcPzgGSmuae70fT6odqDKIDjswIFWcvpDs06C14v0PSsLDyfggoUK6vrPpRKPfQ9nTG5Aj4WAH+yj/UE+uM869qRxcezBAInFyh0Bw/54/TcHp9kfjz3+da20utfibiTel3qbi9H+YrRxrx0rC9mftrY2JD9eRBV5m2zZvEXWI4lKs+0YGLKQUV2cdSmDtWfPPnnxxdcgVVInHxw9Ik9/4+uQqF2z5Ilt7kHMA2RxfselbGUSF0QWu3148Ij+c7lelI0bahQ4IxhSVl4OP5nSJQVDWMjo8RSB/fA76uXExPy77/5y1t0maPLFcSSA4UuVk1Og6wV7HnTjPfWjD1+VK2CXVFWuhgfWC1JSnKasSAIPRZBX3L7jUUnD+1lPd9us24tmJpPZrIinLFBOTp5KAlECkSxA3nfs9x6CC5S7ouzV8FAPcg3voz8z3+GZBCar4LNP30Bi/TMFbLxeSNCCscIke00NvMu2PyRbtu6dIena2taE8fs53rdHlXniRYL9k0/fUYCJeREG/Uf27HlEx4R+JwSFPnj/V9NjRSmjUDKu9DYhO+AkGDTpyAs88cQfKJgTCiyJZswWa5kh9P+TDz+Vg0MH5eVfvSpllUWyZ/d+SE9tQEIev/UygCG5udYxvHnzAwoMcJxmC4IYV6+ekmNH39Sx3PvAN9HX7TPGkuDHJ5+8I1+cOIzfqUIefOh5HW8GJTy/9tT39dgnEMDfb6HBPp08cUiZUXwX2//I7yj4YY6F4GNYvd4AIA4N9ysgVwypb3sQlKB35WefvinHjx+BifoNlX/jMUzZ8JLSDQCsviZbtz04QzqLgN2hgy+iuKJNj3H6sHGc6urOqvQWzwN6tG3b/oBKpNKXTkEhXBPMWD362HdCevnwvDp95pi2R+DpSfizcP1ojmH/+KScvd4jv/m4Tnpt73tFngz53cdWy4YKyxt4rr8DfXsJyPAf79Vvvfn2NBiyb9+DYHavVjCEUr2z+XjNA5uYa1ej9uuwN0z2BQGI6eRWYKYBHyJ1goBAh5y3QAwABuGAADJMxhP8MjBhgRQzD8bb3+wgAv8msENwwwnll3Bh+kDfDgUkMNgEVBIhdds1WSA5cWvmbGZu+sGxIbiTE79mevMEV6aBm4BkWAz8CPfr3N3pEQGQ8al46cSN2VTv09Baq/ohDTSAe/KWdTtFcP8f74iXFEhapThTlSEy2GQl6fPc1ZCm6pGW9g5hBX5RQRaqGtpVWoWMDYalGW9pbPLiyiguhr6zY1J6NC8YSMC3t0vJLSsB1wsE2NHfKpNjqTIe3w6JrXRpGMZB3WfBi2SeEERoOn1K2yOAkFpWLJlgqGTGT0gGHmIon7WuOE9uwKCcSf5MSFgNd6CSGd4cU33D0jdxOzFu/E7YFhP5/Md1zLgQ9CDLwRiyc57fB/krsAIIfvBvpOKlOYua1KkYMtJ33dLd0g6JpCxZDSbICKSq+vvcuBD0K3gw2NauniUEbdbtvF9Wrd2tgMqm7fC3yIZ+uCtf3HmQr8L+9E1aSc5GyJEd/egjqa3dLXlAoL3eJGlov6VSXtn5NeiHyOWmSwpUSYFb8vfdr+PDSO/jzXCbjOOT4S5dLfFg/DASk8Z1PoO3iqG2TkmfGJOhBAs5j4P/RX9rvbRsd0lpD1BhjIeRuaLJvQFByJggw6Rqx07pzhqTTwF+EPhg0Oj8DxNKZf2tj+UIjM7P3Wi1GB86V8RIXuWmWjXu9A6h3wfbc2E8CdzQ34O/CbdpWCBVxZ5poM2AVMagvQfHsgIi+D17+vvUm4bgB/eNbbA9zr/WcEOZHcr84PFP5gf6RBCEMmEa3dZDahbYenYQJC03/MXZWnF5/j8ObdGe/lG51TEkY3jAYSThgT4zvUdaUT3iTl+/LFU3kfb2XmGAsOqKevLhwiTgOX8qlVyrznCLhp2eN8Uk8Ex9KztwwL+HViAH087OsO9cMHizEBAk3DaI4fSiUs4yIsLYxxggYY+vlTCjG88aBw68onrf9xr7YyWMb6wPX70RGIV+9cmTp+S1V3+Disx0WbNulezauWsGGMJCovnIOXz1RnNx95iVz1vAbrtv326VEzr9+Vl57+D7SDqVzZpEWcxeLOS+u5j9iLUVegQolcYw/mtMwLXhPf3wkWNIqt4GQzZv3qlV9dXVVVFXI4fe4sypTKhSNodAAYOyhkbSdLb1KbFI5gMToh99+JouavdyYsX56VMfIhn+VzqPAIjbxWdjqAlj3f0PPasKEqz8pmzVQoOJ3q6uFjW9HobU9Lp1tQqu0JtqNtCd1etPPPmHCkSwf9x/FtywPXqUvPzy38jnAC+YMCY7xuW23gEbG64qSHTx4jEkn3+k+2N8rFSaCLJETDSTOcJrNMejetUO3U2uS1+HegA1o6N+eeGF/1lBIYII3BaBkjVrau/w+eNzMKv4Dx95TT795C3Zc99T+htEkzhe6PhGuz4ToASNeAyfPH5a70m1OwDMb6pVMKQa+ZHq6upFB/SMbxi9w/gbhivKsu8HmTmUqqJxOX8LAlb0YjNgAxP1Fy4el1fATGprvSlPfeMPIPFlmDm3zxkeYwSmFhrm9710+SzYCL2yes122QXpK3rDhQse29w+gbyNG+8DmyJPzy8ew2yP4MeBV/4rwAwLnKmoxG8QOA472pvkzKkjcvXy5/Lscz/U88B44FBxhMbrHJfOTuu9lMdwGdhRgn9cl8cwwUaO2+9950/0mX5wECBk4HwpK1sj+XmFMxhSvNyRSUIGGM8fnqeMYP82s7+U7g+OHiQsT9V1yODIbcm8PqitDHsrghed13cmuckKIhBiB0N4H9+ycdM0M6S4tPQOxq2VtZzXZqNaiYblcw0CC5SaItsiOHjeZI6XS8LETBZb8HJkf3RNXZlmcATPt3+fD0jQ67gsheNbI/qQEJQIlvHi9gakSWW4uC/ReKTQb8QZj2t54AczbRAI4ZgQYDFslWDQaLZ9j827OyMQPjsX6E9i3KQ+dJdXVOjDVh2ADEZ2wAjd3w4jmvws6UzplGxvvEy6oM3cmypdo42Sk5yBaiKfMkXSC6DsjuQ7q1a6caGFu64mmBm88BIEYVAGi0gxK0umhgmG+NXLgv4fE0Ot4k5yST8YIK68ZPFOdogXuMKZs11SGFeKh6VV4sgcgCcJPA2SnGoKnnEtXdypOGg9eIiatC58E0ClGWQ3eFuRdARwoQwAJLKBUes8JsBNMNlNYEMZDQgmwA3QMS3thOn2ac6ePmW+uD0Vt9vBwxCh1HMdDQqAuL2J0thHYART0achUPSchZni7U+RTJXUKlS2SikumDl5lQqmJCdaVfqpKZkymWD9bcAP+pjcumFRRoZgnsiL7jgYPOPjidLiRf/7z4KQ4ZuW8mptuSlwep+OKWWS4PcNaOhcmDoHasPt+Y5yl4JD9C5pK3FZn2mT+jkCo9CxMxZA0vBQsU5jtGE+oyfbpcbybVvTJLtiFcYZeEH9NVQpWQ93GRl5simvXFo7RuTy9Snpc9ZI994KmRoBk0L7ma2SV66mBMkrtuSlkgF8KJCBQ4eMDAN+cHv04yBYZwzPCVRQ0uo6QAyyNcjoUOkvHIJsYwbrA0AUfxW2QT8CgiZsZ0LVBi2WDL1C6B1C43QXiHsEQYwUGI3TtQ9ggRAIGe60jnOdeBcjBRJytWvzpo24af7cjAeKS++9Kv/hcqrs2lGrFQs1q2ukoKBAXzTuhjfIvcIAmY3loIeBjR0bN08DRz5s28MACAuVj1rqwzQcO8MkXhaj/+G2YR93MkEavUY0bKn3Otb+XEeARRJvvfW2mgLz5WLXfbuE7A9WScciNgKxEVjYCPClkP8+PtoxnXgiGLIVMg7ZYGCTSU12CJNP9BlYaX5hC9v7lbs2mTjPPPW0fPLRZ5pModTR4489smwsEGWOxmLeI0B5n6UKPpOTARIcTMbwn0kkW2BInmzdtVllstat36RV9QRDeP9c6LlsJBRNPwKWk8HdmvGdiXcyHxobryDB+mNUyb8Lyant8L15SIuFKOPz/vsHpKe3Wx7a/7zs3vMNTUwzuK6RjbKp9My6vUgzmfBuboIaQtsNTbqvW79NAZmwz46BBg0TJAtS3fZlySb54IOXp5O5TBATJKFcz9iYX6W2Xnrpr5AkPqySWyXF1bJ58/0zwIguKDgwuUwPkkce+TbekVfpu9ilSyd13aMf/VaTzGQQ0B+LCWMCLZTLu3HjHJLDZCNYOQ52l6baTU1XpenWBe39urWbp0Gl2cZnqY/hUNs2xzDvSQcPfRAA9PKU3UQwZBM/F/F+xN/OAFDWWN2Z7A3uJ49BMkX27fu6Hjf0DiEz6fHHvq3AHFkQ77+Pgh0k+bdt268MEQJmDPs5s3jH8KR0dtxCXxp1GwQbikuqIwJc3HcCFwa8MPtJYPHoR69Mgx/PA2gjg6WwoESP4cbGa+r7c+TwAXnvPco/Vcpe+IzYZbZ4DJONsmvPk/Lc8z8QAkw8hnl+Exw8+O6LKjtGplPtzkfxnLFJmSFqzl53Uq8R9Gozlzl+Njddhw/gF9pNnqdkz/BaGOJSqDmbEXjpeQNF1lwnxTEk968vRk7ydm6P3pwMetGMR3MB06WtYF7SyVymLexJfILSBhB5Fbmiyqoa2bJ9o+bl1q5Zi3+rlRlCH8OlzrPMxbCcu0PWhN38fMZO4ks0/h929ocBA+YDcgRve8Z4L9DgnW0RtJjNyyPU9oOZLOa6FWrZ2LSVOwIRAZCpuERJw8WbLBANJIc16UyZIMSlnuuSO5qnf1P0pwRZYxcq7qdGylQSywdfBwIjGZCYGuxtBjAyDDAERpuouNekMyI9FVX0kLIi+EFTMbJAHDDlHgECySD44QdLoyCuX7xIXLdl1UlHa6LkZGXgwB0Tf8OEtKcOoDoArIRxPixZumsEDvyQjTJsDYIcKalgCGS4FBjgRTLBkyk5mEZPj8RMq9IkDjJLXJaV/wQyuK9qsI4oR9Ld78yEv0bfNOCRmV0iJanV+nDW2vaFJNAfBAAGwR/6fljsD11dJbaIMaQ2ecWfCXCHlSyDfVhnWPKcSWByWAl5bpv9ZyQHvEW8AAOSXTAK8oAODFmwFIBNpKtTszcJ+zLQZiHuTM4z4kBvHAJThjJfly9DmgzG8pm5oPAimZ+CK1J23YCMea5Kdba1nU65piyK4SzIZKEJB3CGFPiUMJjMj8efhDUu0OQEQW7IWGGWdLdavxMxd95a2lpP6LRcPNxx+SkwTRo76qSx1WLvtJ05gbKfQPYXY1EYly0XWj4Q6/EMWEe8R1omMYYW+US31VY6JpUXAYjgWDHyWwQopgBieGB4n5ZitUcGDMENk1umbBdBIVKIc8E+YlD5Jg1ABtfr8Y4rEEJwhI8oHHdsWY/xBBy/oyDsKNsDf6+BTilDGSH45DjHeZwq/0XwY3gQXiv43gk2TC7aZxjGlH65y+FOBzV2e6k8uK1EX36GYKD4X+rfll/84rAcPNghr758YMZNmlqWmzZtVHO65QRD7iUGSNifHKdCVlZAAgsLzYcBwkqdYB1hu3zUYoAIYfu/wBnh2DGmz4tRgRqOATItPYZ9oASZI9GI1y1wp2KrL+oIUPrqiy9OyEsv/kKTgAV4Afud539HvT+WQx96UXcm1lhsBFb4CKgMAV4GPz76sf7jSx7POSaf1q7foOyQLVu3LloCdYUPx13tHj1ZanftvCsskJgHyPx/et6z6Fc112TaXLc4DqYBC/jChR0MaTpQLwffPKjyTnYwZBO16quqliUBZ+8nQQMm72/cvKyMBCaLaQh6KHQAACAASURBVNLMKvlPPn5LE6eVqDh/5BFKX1WETOSGSnqGG4tw000CmjJAKlGFZGZRUXWgEh7P5HdiTHc0ZQc/+LzZAPbHqZPHtBJ/34PPipGk4raczjRU2++CH8J3VBaLyd6zZ49pMt2ehOdGKHW1d+8zKq2UCO9ABoGN2tqHFDxh4p3ADSWA6D9Clkl7+zty9tznAI2+rpJhJkZGBuBtclIBEhp8kyViQCUuE+o5eT4J4ekNRvkHj+HeXnqUhnYJnnEMgx1CQGSl3I8oIcVxrod3KRkJx46+hQLcDQpWUTbq3JkPdayffPI78IzZHPIYjnKYolqsr69LC4cpf0XJLSeS6vMJHqetAFIo3abH8EPPKfhRWbFGveJ4DNMnhJ40ZCTRb4QeKgawMNvkcwQ9ankOE9w04AgBu5079itASKZTZ6dVsEvAhtJwbI9sLLKy7OCiJSt3XRk1nqxs9S+hf1Coc5S5vffefQ/g2WHN8QWHXbaemh1//3HwEtF9ZwE3x2i2MAl/giFnznyh/8h4MWDIhrVr8Xy1SRmfE3MEYGbbbvA840URPD3cdzI3miY/u4M1YZZXr44IQW8MGp1PMyIiLD/f2WSY5MVvDCuDxX2ZLfgbBXt5zLY82SRLvU+zbT82b/FGICIAEgf6kyvNIZeBkJqLybQEEH0QkATul1vaI/pB9NAzAZlsJqGnfSJYEQ9JrK5RS69+GGyPlARkr9NuJ54IfpAJwos4b/4FrnIBy1OD5rWBP7XNBgApyRVWlX3GECRgoMnU0DeoN1PG4OAIGBNuNTUvKrF0F6cmATgAfHEkw9wbEkc3bjZP+3cYQ/O4jflYt0HK+92S5YaXCPwh/ABIylEJZ7FF2mQoHswMbENNz5EI72ttxa5dk9WrIH+VugnMFcgtOSeloblNigKgiSMlXZoBilCmyUT2hFOZLdRgViktmK3391mATIJvAMyUOOlOzxFXQSFYBz6QV/plIr0QbSfp9k2YlxU+eGdnOfHAVC3Gyr2tuVHbvNV8S5FnSjsRwGICv7PfK+6aUvVyIduFII5nqEt68JuswgNXD4yCRuGhYdguXoIkeCAchz5rWny+yk4xcvvw6SzQ8bjp65UbOBo2JVXIqu2PSlxRjnQm90ndMNCUDqAZBD0AgsTtLtF1KXn1rZFUeR/VAq1yU4r6DTLfrsQUY3JOkKXgVpLlTQKJKnNsEaxhvQZBjMR4y2tk1OZRYnw7CsFQslNZ3E6PSp+1Nsdh3W5lbxiiC2WwCHjoGAbaYl9dANoog2XaJIhEj5HEBDBCAkAIwQ81S6fJOMaHf/N4NfJfutN3Mfg8z8qx8Qn+NaXHnz14k75w/rT+ewP7x0rQVTWrp+mbBgwhvXQpE5D3EgMk7M+NU0H9PxgWLhh20XAz+PI1mwfIYoAI4ba90OnseyjJhGAGyEI8TMJV8dmlxzj28WnWfWOh+xRbf/FGgNWHN65dlZ/+7G/VDJjmiN/85tfkuW9+8570/li8kYu19GUeAT7H8XlwHNWLSxGJkExhDIPRHCrsFXr8m9Xk/GeqyUOBIUzWL2eBRKh+34vTyMK1s0B++/qrKEjZhMTo3iWtGI15gMz/aGpHEdqP/9tfadHZUgeL2qJNbBmJIQOG0CR5zaYaWY0EOiWGdmzfrhJry1GNzOcyJkKfeur3tWqdyVBKXVH14cMPf61sBppRW1XjoSVWDHix0DGeYPILXh7U8mdiNT3NKoIMlViNtC3+Fk3NVpKWCfJN8OekATXDtEeZFLIFDGBBEIhG2XYAxOFIAsgMTyYkhQl+mHUpvZSR4ZmuOjfsb7JL1q3bpsARWR5kAxQWlE4n3SmndenSKQUaKGOUm3d7Hvtmf05OTnGoNPlPf/oTlUhc6mBuglXy0UQ096M1qKpfjmOYvwnH+BF4bVAKi2N/9NjrUtm8QWgoTkCNPh8EBoKL1KLZ17ksQxYTjwVuk+HJttQb5tKGWXZ8fEKBNQJ0BFPIFqI8Fs83cxzyeOGxSaYJAYvr1y8qYGFnkvA4p09ISclqlQYy65Ll5QLQyXOc/eW5x/7zPFm9Zitk8SCRHwD3eI0wY0f5q6tXTivgsHbdLj2HZhtXgh9/8Rd/oUUcocL+nMP54ZYLte70WAX8HWZbJtQ8OxiSjbFlnuXR/Q/K0888H2rxZZ8Wirkx106wjb6phpD+IXNta7bl+Tv6k3tkYLwprAxWNEbtlLBif9OR26TMVbhQk/MogPFw68emr6wRCH11COrjqM+CH8hQIC2eptHqiYFSeVbLs/Ld4RuUW6yAL3er7E+hu1Bf5IiSMsGehYRwGiSakuGh0NXZpJbZ5Zlp2hZIAJLjgYn6SK+yP/LySyRbNfEt43N/R5u0dV6RfH+/yl9d7Y6TgVuXVIKIng30ZmAF/vXGi/LCQ19X4MPEGNgbLoAL4+PkJ0D6aGoMaHS89mdN+XpIVGXIrZ426LrlSSc8MwQACI3dqayfCXogg+yMjPQ0yaip0P7SMJ3tDKDSJx43gmHcMAYaz8t4d4NKWXF5ymGNerssA3S0kRvvEMfUbTNilz5owbTdC6ksJMwnmHBHTIBxkpHiVmYKzdfJZKC5+SiwopTxQVQrW7qOY5CySh/DTSTbYoBwXT64MHFP83QGwQ8G5QwIcLA9H34LA2T54sBgyKyUFDB0+roDDB94egyPpAMEsNgkRtZLmREABybAtJgAdkVmAxkYnZAMyyuulLKqfBm+0S43hk9L53rIVYEh0554RaZuAvCAx8e0z8c6S/JqfUa57IDR+aGTB+S0f/A2+JGF/ettnwY/dsDgvhG/XQtksHbDQI6MDhPG5JzHgAEmeGPu6mlR4KEJ4x/Xk4LfzaP9NcbsBJhGhmlIlyKbM4sBnlkMj47mZinHQ3AHGCEJYIckQ0NTmSBYnqHbC0hnkTFiDNDHKamGoSajZCIA/HE9A9TYDdanO38X/hiA3uWJy+1y/DLPOCuO9KzVaivq32oFaOCm3o2HZspisBLU3KRLi0tl165dME+jVFaNZLhcyq5abDDkXmKAzOYBMv0j8CS/fVjP6ciI5AEyp8YiLNwLjWJchSIsFd1si71yZ1vBDJCFeJiEqmzT3nG8TeDvyTFei62XB9uc2J93aQSYBCb48Tf/7cfyxutv6gvyY48+DL3g34dJZtWSJv/u0i7HNhsbASQCppBIuSivvvqqtLZYHnpLNSx8Lj954mzUzU8nUIMqcasgjUW/uZ21O1H9Co8KSOss9vNA1J28xxYksHTf/XuQDH5Ifv7zFgWCyYYrxzNxKZLVSymbsZKLJ1byz9zb2yuv/+agVvouVQTLb8x1OzyX665d0n+ujCNyCNIsDz+8V7773e/Kli1b7tCnn2v70SzP5CUBjv37n4YP0U/0H9/9WRVO5gSr64NZEfZ25wNQBPdrMdqwt0kZb5qyGzAlN7dYZb3sweQxmS7GE6Qflfv0TbA/qzLx7ILqBqv47X1kWxw3kzw27dKLhTJirLjn+F25clzZIomJach7TMCP4aImtA0oQ8Bk5ljOzOgRfP/g0CF5G1X0SxWLcQzbwfk1kHZ74P6d8jwYwvc/cP+yeCURnOI4P/rYt+TVA+1y+P1fy+dplofLlm0PKcvJSF8t1Tiadkdp7rpI0dPTOs2IorwVC5ODzxUem9k4jnisDg62I38woP43JgjiETihP05wIRr9U3iuU+bOBI9tMmjoNUKPECODRVCF5wblr+gbpKAMwD62PVuweJnsMZ4rSxUEcCj1xevpXCMFOT2OEVlCLDKl1CiLSJYyaGhuZVVn38pcfDvCtbRc7A9un8AzvUpy4leHBS+y4iuRkzw7nd8K1W8ySUridoWaNT1NwRErVTvrcrGZX44RiAoAGRv3BRL1PnhUWEl8JnXJDiDrw5XK5JFVQ8/q+uzqDdC2I4SApDPkgBLap5R9Qd+OifQEaP3BPAc6g/FjLmVWMIFtktlpGQ6tBqEx7RBABIIf584dggGSV0bciXINUldqOs0EPBIjzGVlQGhpENX3W4pr1CvjxnVU9AbOdCceDuAmpv4lvOCMSZL0JEIKyw3UHFJL/T0QMPL1i2vjmmkL4ky3V9KTboMoCVl54kOiNwleJgSA6IkioM1moFrClYI+jWBZgCw0MU9GMpym5gxzO6BxOWPKlSp9HQGgAd/7h1DtBzkti8sCsAfsEoJMHfACYWKZoWbllWB1JMerlFbcIDPtaVq9P+T0Ih1poZUjo0BCMcYG/NCVEWrMDlBlJNAe2/WCdVLpyZJLo10ACvqkxgMDdpjXk8nCTL5/0pInI/gxiLEhIEOgpgqSY5cCrJbk3DyphK8LZcNG8idVEqrBjXFHMQclxChZxSDwQaYHPT7oe5IJkKkoLVku3rwlh44cUGNzmw3JNPjhBgiT4YuHBFcCZMsmpAjMGmWwBAAQBRfQfrEjVfeRQfCIjwEq0YYgAwOQjbScvC4uMGN0OcwjiMNIZXuITABF5TCTt08jAMVjnOAHGSYEN/hP8N0AHQRUuBw/OU5sl3Ja1nJjOm4ETEx/RmH61dbWptu5G0GZubeP98inFy1wi30Y9o1BEi1Xzw1faCYyQD8LDOFD67Gjn+mDNplGwWDIYu3TvcQAsZvqDSqQ7NBzmNc3+6cX5+98wu4BMhSGSj4xkAAglufE/B8GCZDVHWvG+Wq1QXZWbmq6ApXOU30AWyEjh0qfaIMPxYYBYmmyOiDXhwPQn3CHaXv3OM/vuT9kmgdvts/xpZRiqLEfTIoP6MLOf3yi3e/YcrOPAK+RFy9ekp/8vz+Wl/6/V/S42rBxq3z/j/4IwOuOWHJ19uGLzbWNQHCVcPBL/EocrMaGRmhkv6YJ1IUmiZZq/+yVuCnJx6ADfkSryb/26JPy9NPfWPLk/FLt10psNx8McAK/p8+dlc8BgLz++tvQwN8kf/iHf7CoptbB+x7zAAkekei+p6IwkM/GyxFMdjMJPNfgdYWMyvz8Ihh+b9bneMrdFhcXSyjz4Lm2H+3yNGGmxwfNmz868qq+o2/ctFur6lldP1sEX9tnWzbcPNMGAQQGE5qUw2LYK97DrR9q+gA8QMi0UBlvRwoAEIpAzwwal1O+h8Hk8RhkqunTYY/kZMcdCdFwBt30lWClPdkdTU0HdDz3dneqDBblr+i3wmQzjaOZZKZPhT2CE9ScR8nDVvi7LnXwGKbxeXBFfqTt2o9h3nv27blfpYToX7lc5u58nuBY7t79hIJMv/3t34MN0gAwar2a25PBsFx9ScaxpsAYQDiyKuYbfCcbAphhQDwnrhPBIB7bZrI+JdmSWaO8+CgKToPDAQA/2uA4FUK6i8wnAiB2GSwaxdPbhvJXKuEGsM8u4RZqG7/7e99Tr0B70HTdj3NtwHvbIN3lZF4Qxclz6Ktp88qVC/Jn/+7/ifoaTNCDPkNkmtEYnZ6rZHSWFBWqF0hbiyUHFmp/lmtatOyPSKbqBFvIqlgOI3BeO3qTL0vXeKXky6Y7hkpBi9u153fM5wS9/gDYIPiD+viwQErIlWMTv7QjEBEA4QNROm7WTNQLNQadlLHKMD7ZyB5D8slhVduXgN1RWFgIsyGnDAD4YLggsZSQBx8NRCoAgEQkkXuHxwGEZOITyxSMSVorDKlZXY/IyqyRHbs3aaJqFMALmR+shnMBBLiGi+wlACAMAi/G9JrVbHkAY8o21ErnJFgoYFdQdioTSWkagxsPDa6XUp4jg3hI6Y0DajgFMIJoHm4c6SNjACc61OR7ZBh+IqkO6YbnCM8bb+YIIJYRmXAGoD/kMYfjksQfj7HBRa2wqFjBBzI/CHr04iKLJyBlkNj9P25BDuoWKo09nX3S0z6poAaDY0vEmreLPgBCecjDTSBZPzhkJf3GYabuLKvSZZk4TXXBKwUMEP7TgBeIf+x2ApWJfYIxBEOcaNs70qYAxnh6kXqdMJjc74SDPAm6LU3XlCHCUGYPXC2C5WXrLl6VjFWgl9ZUStVagEXYl4vQvwSaIq03uy2GB59ZA9fwJwse030l6PFMYp/kpKOapdcnn5+7IL8EUyfuEtZFlHb45GT67b4zqfpYwX06r+PmFQUWsMPqCZKNz93bqV+aigeNW8pIySsuULo5gQcyMvqvXxdWJW4s2qjHzfjwdbCNHBYw4cPhDpCCgAXZMQScCGi5Mwf1O8fr6vVrUlKQL20YHwb/zsRyDMPkIKBB8E+PWCzX47eYOgqIkAWCbVD2qq+lQVkkZM8wmNj7t//m/1Dg7m6E35cMebkyGcZvYw+fD54vePmIFCbxwQdWymTZwZB9+x5UA/U+SKvhB4vU1Kzz70UGSFMXrisAm8JFzxA0iJMjXo5nrG6xKKyHzDOtFksvVPtM/vv841KSa6c+hFpy5jS+qJlrJ6tcmIBJPXNbW7gxsDgr9HlsEAAxD8WRko2GAeIEgNzb4pMztk2zv/ax8GLb7D8fVOcS3AbXYXtnr3dKU7vlXRSqjV7cb+Y6PqHaiU2b2wiw6t3n80K+shcvboPQxL4gL//6V/LWm28r84Pgx5/8qx/JE088uSxVfXPrfWzp5RoBUyEbKlkTqg8EhilvQtNOBl/YmeharoREqD5FM43XK0p2dhdX6OKm2nexP9n2fKsXuS4TUG48+/Nlngmoh/c+BCbITvhawSgdxqSxWJwRSEiIV+D3n/+zfw4fxl71QvrFi/9D8vPy5evfeGpJrokxD5D5/3bF8G38s3/3f4WVmJt/yzPX5HXt73/2d/K3f/sPUVUghzpfCXpUVVagaDBvScG0cPvMaznlqF0Z1vsRK3lZbU7gIFJEer6MtD7nsw27rBSTvmRw8L6B1HY0TShQYtqyr0BJ70jBava5RKhENNdnH8jqWAOvCcqJUQaLpuf5eYVil79Sg3ckmYPvoXb2CdU+0tMz5F/+6Efy/e//k7l0b17LHnjlJfnP//lvhLJAkSIY9GAuYPu27fBQgWR6YZH2e7nvPRxLsiHI2GEYSbpM+NyE+70i7edc5xtZKYJu7e0t8NVoQ50nZOZTbr+rzdZmuGN4tnU4jyDdYgZBwTVrtqupvF0Gqwtg3pWrZxWUIchnyWrdloYL7kMicpZr162fUSzF94wvrrTJG8fqg0zQs+T5h6pldalVtBzc1mzfCahECjvoQbCZeRIDegRfdxOSbgMzkdqdz3wfZO0jxWKwPwiikJExDvuE5QrDAnHFl4T1AonUFwI2A9Ik+RN3giiR1o3N/3KOQNQZNyabBUn5hL4kSUPC2OGdkDa8WzIZTKkhVtsz6Ut5JT+AClpEEPzIiPdLYn6OjCe4JHHCAi/iknogi5UuWXlF0tfgRSIZCenA+FVVrhUnaHe4SqmBuBNV9O4HdstICczPKbd0AdOrrAqbzIBBd3ZfvzIRymrWyLnOOkj6uJHCn5AhejgAmOhk3hFJ7ML0VAUo+h1IeuOT/Zgaw0scKox7AIpcTPZLwTBAibYk6a32a/8FuEN33XkZc1sPZalpLplMouF6ngwhWZ824sBDHLoLHcNe3gz7gPJCHmsqoCXK3SIDZAS+IMacndPSlauQqkbnlK5iBb5Ox3MTL+A0cuLfSUj+TaVYT1kEdeLjgayiojspcLEkCBI/gWT+sJVczYAEVBaAgGEAIExqq+9JQBIrcQjoBMaBif4+MGrGirKkKzdOUjuRtIYEFhkfE/jXB5E74yPCPpHlkIHffv2+LXLK2yMHEo8pslu6tkYBo63D8H6B9AGjsdD6jV9AVXUJHgIcp3vlQt81Od7SLo0tPdIA8KfLBnhIXrps7nYoaEBQIR0MFR5r7IsyKBC9IUT3CBgBd5NR3OyNRJX5JMuF4McNgCEMUgzZHveL0lgJ8PMgq8ON71y2h8cx/uaYEPBqamvXbXP5/hGyPGAZD3YJ68PJImE/C3Dvp3QWt+kByGGkwrgelyFYwn3iCUbQkL8Xq12OnzijjIq7FdYN+a3bxnPw0PE7eqLWEma/TbWOoSDTXI9arhxnRnFB7YJ2715igLACbHtBOUAwXL2gY+f1jUs6rjX2mMALaDYumOW4LoUzdgs1oHz45ovPk9Wj0jKF62igbV96r6QMWQ90Q6TS5yYCUEiQmoySkBVpodrmyyFfRvlStWXLTj12+QBalZMlJXnZ8pMur/wwxylN8Pa50QWAFsuSGVRWtiZidQ63x75zbNZBgs65DgwV29h4xyEh6LAe3k3/i+LgqQOd2OCXt1B9N9O4LA3nHquqkgYw3exjb8ZI2wcIU5KfruMTi+UbAb6UtLa2qGHh2++8rdeQxpstmphl7Lpvl/zxv/iRfOMbT9+VBM3yjcTK3BIlDVh1yhfsuwUcEJxvBeDeB/m9NLwgFwLAn00ahcBHe0er1F09hWTATU0GsLrPDU32isqN+vLMxNBs+tF389fYtn2b/Pn//e9RGWz55S1VX5pxrv3Dz342J5kTexKVL/Pb0dc9qLylJBOBj7uRgFqq8Vkp7fLJnwU/jz/xuJy7UIek90+0EOE/yX/ULj7x5OOLfm2MeYDM/9enbNnqNWvn30CUa/Le+e677866dCjQg/K1K+V85bX69KkPURh2TKVtCnCNrocx+BdfHNJEvd1TIHhHTdI2ePpcv/PeRqmqgsJKOX/uMzVmZ8LVbiIerk32n94ETIAHV6SPjVnXbzI7wt07yRSJBigx2w/HAOGzOsH9tWtrdT9YKV9/87xKjBn5K7J9jHF08P4EP1MTRKhatSp4sUX/TuGtL774VH/7cBF8DBP02LN7zzRwd7clF3kMXLh4XD75xDoXadLOd6WjR3+jXivFReVzemcJNw6zTed7FHMQGRn5yPlcnPbksJuIh1uf/R8E44NBuS4CgsHABo+7UMfwqDHpDdf4HKezgK5m9Tb1xzl16vC0uTo9ba5fO6HvmHwf5TtvJAB0AvmzxITbxxW9TzORM9pQ7UGx8G22VQYUV9Lxb67B6y/ZJHwnDg5zzJKtQjagAT3u5nVXczZIbRKcmM3jwh+H69ZMRbzg3dPvNEEP561BECUSQyRkowuc2Ou4LFlggTjiU2fdx3CbUSZJ3OxSWjEPkHCj9+WcHhEA4YWkuetqgBmA5DyS2R14UJnyQ8MP8hQ0gGZ4tq6HP4XFM4pHpb87CenlAABNxkdW2oCMA5VOhNQSwQ9+8vvQwO1qD7I6dj+yXzKcDvETAMFNnYyOj6euARFul3xfPs88DUe2C7L5AQZEOQzLM1zystShZF/kCqASjRz+jywLqyKdKWyyEhilUoj/F0J+KR2e7UOS2uRFGyCkbCgXJySweA1oBejCSEVikgM13DcgHZB7GmqPBwiUKm5cAFvIImHgTSVrCllxSGs5YKTTBFAjDWAQQQsL/ICISx76gdxO7ijaw7WZ4M4wkqEEPwh6jGVZjBDAp9MgyBjpqvgNGGST+NrSZTLPEtdiUp1SWPSyHpsAqIRKmqFBeG+00ezdGigCBAwj90QTdGU+IKkPKEji2vrxO0LGBlJSzkJ4bBDoAqjF5D/ZPAnoK1kSmflxcg3jcya1VdublrUKeHmkZmTJqox++dmFRjmdMCldn34kVwEaGNCD61zrm5I4SJXlYpeSm+LB3ihWwCN7k2UyT1BDAz4ryL7qnwQeaHYOREa6AaB9drJFDckJNBh5rqSeJNlTu0r9VzLqgPBD4ov9Z1COasPatTougnnJqFrkcZtXnK/HNI3NTawqrwJgZCWOCYwk4Pfj9skwYhD4YBDsSAW7pxx/9/cBIEE7duCI85vBCqFMFwHC4b4s/Z2YIH7ssYeVjcJQCTf0ebk+hwDWpaf6IS1nfXK7VlVh25wpyARScnLyoMlfpB4z1AEn8+ajo59jX2MMEP2BA5GKa13JeLb0j14AxRIsBBAxeF0w7DAHxPecqTnihu/RXKuG+MBKTeEkf5P0+25p2xl4NBGXxTLKwKOIO6lAMtKKtQp6LsEH0n0PPie7+33SMtwuv/f5e5IFw7tePGAwCIJk3awHWLxeXtr1uEzWluPlf2/Ih+Vw2yXgUx3vlcHJZvQf1yr2X4kqtv6nVuAabFVYhWsn3HS+GFLuoTyxd3p8rLG3xih4fFgNF/xCGK7t2PSFjQCZHwQ//vzP/71KB9i1dAm6Efx49tnnlqTKeWE9v/fXJvjRCHPP9vYGSKWUS1npqjmd1wsdIQNkfPbpm2oqysQYzS//2Q/+LRLtKM8IkQEjWHL6zDF5661fyJlTR+54OSVbYdOWB+XZb/5AVuNFeqWBIEw80V+OBSxLXcl69cplee211yP+TPaX+dKqIpUa2b5th0qNxECPiMO3aAsUosL5D773bfW3e/XlA9MgyBBYcwRHKJW1mN4rMQ+QRfvplqQhJvRY3BYcX5bzlfeXm/WX5f33D4Ct0CgP7X9ePQXoo0AjdBbS3HffE2Gr2CMlQIPHJdx3+jgwUW0Mna9e/lzB80ggOft//jyMr5nozi2Qvfte0MQs/2ZCn7JAg0O9WkAQnDz24rmHzAwGi4xYVBlKKiu4z7O9GxgJoZqazWCJXVAZrE2brqmXAuWvtm3bD+PqqpD3PDsDJHibS/l9CknkcEHfSd6vq6tKpHbXDgDt+8AG3qD3nLsNepg+c9xYBPj++68o6LTnvqekqKhUPv/sbWUxFAHEe+KJ785asBFu/+cync8KOTlFKh919sxRfVY6d/aYPrPNxgIx5+AHH7yszH16llC2Ky2dRWGQFockXGdnizKiHDim7eccGSaUe2OQeZKRTuZnIH81l87blrUK+vLU44MACM3VW1vqVf6K1whKuBHkC5Zwi2ZzvDdWFaFIOh8etja5uQT02ZG4sH5z+8Ggh5EHr66uwu8PxZwI7KTwZ0I0exfdMt4p5LECjLXZ1mBBLCqCwwZzP86xrLAgA0EUP4rLZ2sjbOPznEHwwgAYrjiwQKJk8AVvjsANAZxw68c8QIJH7Mv9PSIAcxn90gAAIABJREFUwt1zgcEBToZ0tCLZn1ku7voGaZ+cedGIg6E5dZPiEyAJVZ4kpR63OD2QZhrvlkwkygL5LCTp00DuSJRb7W3qvZE5mUBMQJzeYancVIuEarl6fyDDLSlAK4tHPFL+ZrNcf/ewJCOBXLS9WmjeTXJViztJiuDjwbCrVU4VuCUfCcX2EQuc0QUQ1uUaAGeuR7oD006bmfysR5IdHwRXNEwRHogI9LCQEmt6T641W7Py+EeZJ8bMx9FshVrG8zjElgwDJbYY2RNO3GCgO4gkKJkeBD+mY8QFma5E8TV0adJeZbWQwGuFDFahO0tN1ZNtPiBcLyuhRxqboV/KJD+/Q0bBn1Wk/iEuUozhSUJZrkwAB0yPmvR0UZxHWvLHZQ/QGFc8fEXcYJ3kVEkOAKw0MFYo4RUHTVSu21GQJ0cnLmj7lLdaX1kq6wfHpShuQJLbU6Tj8DE5H98iZ8C6iMvKlyMtHdI3iMRm/5R0ZDml9v618ijYBjSRJ7jgrwY4hE9Kc9EHhi/+3d04VuIn1AeF+5wMSbWu5CaZ6Cfo4peyRIA9AfCD/aA8lcp9gZFk/FfIViFoQTYSWSx5NRVKs75x8rQCJolgZxCYIPhBYMcua2WAEwI/PI5ZmOJHe5QJ47IM30SnNN7A7aq5U5k0/EdhHbI+MnncItSHBT8FtzcAmbRkn3V8bNq8Wf76r/8aeJYFaCUCwOLfS/2pnUKM46G9eSBeegfa9buve0reeO0V8b7x86h0WHmTZ4WLHfSg+SnHKx3altTvP3funAQw0cBW5/5xLzBA+HBM7dLR0W5U8yK5P0hzxH4FPQ34YT77+5rFh2W8GV2S6SqBhGCBvhjNlqBjgpCmi/0j9fAxqoeRd7+ARKHtm2D7bLe3p1+3784AEJJRjAfIbH2IDJVENOsymZh99hPJZgX1zc+k6NBrOotgh7eiRpz1dfrJICjyv+BfS8ezYIS5ZGD7HvHmlM7af7ZvHxv2n5GC/hEYMmPD6b3tZ6Q/qV5GPBW4LWQjIZ4/a9tsJ9z4uME4mQaeFjA+2tlYLMoI8AUqnJEgDTljsfwjQO3wzz59A/4Ov5Tnnv+RGkoGJ3KWole8bg7BH45VlUwsnDvzoco6UA6NBQRGLz24LzzfL106KS/+8r9oEoCGr3xhzsu3WF2NDVc1MXTw3Rdx3fHJH/3R/ykV5atnvQYuxf5F0+ZSgx+R+kDWHCuGeV6ygnHjxo1adbtu/Tp9mV8pCahI+3EvzecxwefHP/mX/0p3643X31QQhAUsx49/hgTsPlTwZqocTHlFJe7v4auqoxmXmAdINKO0cpZhwthUHC/n+RoCh75jUMwy9gRqd0+7MPF6+dLnep1+6mvfw/tmHt4BO+XI4QPyzjsvSl5eCa4/22fcdyJtj8+0obZ3R6cCE9gnAhc7d+yfvtfYt02AxN5vtk1j8es3zuP+9GuwcH4JxQmaN2/UdmgazcQ9QYdmSEtv3fLAjGdV3t/6+jpROGflJkqRpOb76WzP4qbv4RggnM9+sdBq06Y98sXxd1EE2SiXL1teCkxmM6lM9iSXs++PtW4UWdFwA7gI08mEYfC+U1lVIwTat21YPwP04PWM/5goXuzeRjqmrDGydtQ+dnxO4TMSZcfIvHnqqd9XhinjLbxTv/feyzC33oT7564Zx0Ck7RFImOsxzEKy2l2PyenTH8iVyxe1aKS4ZJXs2P7QHeAF21b2ddstPQfZVz5bFRZWCAGQ3Nwi/U7QoaXlusqIOp235bS4fn9/jzQ3N6iSRG5uFXIeuYtStKGqAzjneS2jDNa5cx8pmMcgwBNKws36ZWb/P/d3GEXV7b0zGRsERvI9UG1JmTsLhHKl7Ke57hpGLEEPAnXMb0VblLDYx3So0YiU3Oc69O4wMm6h2uA0h9+DPB3Lf0NH31RDxDZCr7nwqQtlnhD8GRiHGk7A0zq4R2SA6LzlQKyCNx77vugjEBEAIUrMIAhCAGQYEljDZVmSPZAlw22obh/0w5vaIVN9wxKfnCG+rBRZvapEdlWukRH4Ngx0xMukv0OKs5LlPG5gOZCh6pscFU9cqnSO4iEdLI12JL47oM6Ughv4iK8dVer5WjE/6E2HfJTF2KAGMrWR969DcjplLdoldQ9m5vnFkouLGMPrm4KeabasW10tvUPp8L7owoMGfCCQiHTA2Ybm54w4AAaUvmoZiZei1El5u/sqkOabOm/rqlqpBpBwHdTYwkELMmnN8ABMqdf5jNapbilqvz10FwAimCiMY7rcCgItBFUY7SkWg6UmDabxaQXoLDT/IY1VuKlMWsBuYWRQRzKAvXhQBSjdLYrBEIjIIfiBBCoTg/RGmRqzGBLJ3gRpBNBAYIQMEMpe9aIyqBj+IumVSPbhQTMd1WEDuCkisypFMOia8g3CAB6SZY3nZAp3s121e2SqbUilwkbj4XfiBCqePKWMlLhxwCX0NAkEwY9v5RRKxdHr0nnjhrzvuz7N8iDY0Vk8ASAlCRU8WZKPi6THmyFbAGo441CB7hiBR0YSkpgJMghggDpavB1RIso/au0PDeAJNqi0VdqwpKbnyxos44tPkOEpl9y/Y520NTeqZweDy1N8i98T4vsU0BjHOGXgpyMLJAGAy7DXOj7IGnGzCB4AiJ8MDyRV6M9B4MMZeKwqhxRRG1gwPX03lN1Uke2WTNBY2R96tfhHkzThz+jHepYBu08BlUYwSwiuCPui5ugEY25Dc7wZEui5W9GNSv4zJ+vkyJlWmJZZx+ytuPV4eN+gsjNG2sreP8P02LihRhkeW7dtgSb1Hvy+pQp6kFVgEjYEQBYjvsweICaB5/ODhTTQpAAEgYn4KRyvcQD8cP7yvOd1iUFQgkn+MZzTY73XdTqT9GSEhEr2M9E3MtIOcK17GvjgugynM38GcOBE234w8Ua9uL7QTwnnRTAQYtfFN6ANgY98VFKnfviaghv2MOAHp/Nv/jPLKEiCfy2PAgjZ84yMohque/N902BLOFBIxwaRkVWtY8Mx4xWHY9OPC6Dpf287+o/9CQfksH0+vOn4BEAnM7ZsPzkRnkhonwAIp0czPpGAohmDE/sypxEw0i5ciUk8Mu0uX76uL3D0GPqr//rXQk3qZ775zF29bs5pp5ZxYR7vDJM8Md/t00x37POCuxgq+cIq1WbcZynn4MfzE4NthFo2XNuhlg3edvB3gsZffHFYXnrpL7WqkvcmdyZ8vq6cDF50+jtfyClFwoQUDTSZvH/2uR9qVa7bZQHCjY3X5O23/4cmrMgOOb9zPxJCAJuj1MkOu/F7ZIZJPhnAgy/xuWB4kumRm5s7p5f5e2RIVtxu8PmRIMj//q//VJ9b//EffyF11y4pe+7nP/+lgn4P7N0tP/zBD2X/I49EnXwJ3tGYB0jwiKy87/G46PG9cHvtViktLp0BUs41+TbfvTMSUJSCYfA7DZQZZEAMIEnKCnkTvBaTvcdr/GefvatsD77XP/rYtwDcbdSin0ceeUEIVhMYef/9X4Gdgfd7vG/yXsKiGXpFmO2NjKDALFCFzmKgHrzrmu0xOclrWjTPb1xm/YY9srP2Cb0/sPo8GbJWNLLesHG3Sq+SDU05q35s7yoYIgTnCdQQXNi2/QEkujfiXSgF96v1KuHT3v4OnmmO4Hx9ACyGjdPV8UyaszqfVfo8XwlYsJ/RxGwMED4KcD8qKteBDVal7RvmJJPzxjg6GPzgdsPdv6Pp00KWIYuJ/rIbt2xQT1eCHjt27ME75tawTI/FThQbCSgWWDD4vafHUrkgA2IQOSACBSZ4DFPujGAUn1MOH/6tziJbiZJjnLf3gW+qiTefRd7CMweBPcOgjXTOdHY2S3NLwx3bCy74mF4g8AePz9WQj9q1+2v6zMYiEAbPQR7bZDSZY5jFLdeunVf20qGDv1Km7Pr1D+hyXIYgDr02bt6ok1MnjyE/9ISCOJzH4LlGllRD/TllO23dunv6OSu4X3P9zqI/br+kdIM+p3366fsKhKjXGPxBKNM1n/CPT8IHskd+8zEK9+AJacIDb9rffWy1bKiYe16mrKxEfvSjHwA0WqXFIQu57i5HPp3Jff/4YNjkvv628AkZx7EdKRxToT2OVCLqSxwsxJtNBksZIEgnshg4VL7sS7zrX8muRwRAOCp9ADAYOVkZklQErXpvinTFD0paDyrpy62bNxkgEBtRtLkff7WP3pK+pl7x9w8A6JiQeFSoJELKaAq+Hu40mJ6jveLAkJ/Bp5OMESTdGZa3Eirne4all0yEHdukxOOXfGcZJLOQvx5NBPiBByFU8uWMxcl1/7DkpmVLcS6S/mkOVDz3SP+QVWGR7YZh+LhDX+bSkeSlnNckADyatDNxHpeUKF/LXi2nAoyRbU7LE6TYgQ3B1yJxIE4eAMLtTbce8gZhlk4T9YSUPjVyZ9BLhDF0BYbuVfnTBvDQdRGPa4t0J3jl0+M3ZaoAywEAYTCJR4mvRMhCZaVZP0NcD/w+KKE1DNksV7ykbN8sxbjZTMYDFcH4eRuxz0jeMYlnTOLjkNQbmoqTkbgJ9ROJnxyQBMc63UYuWBQtSMTzobSwohT5V1LxXDKKddwOJB0bLTCHy6ZBZ9iBJGlSYo6MVMXrPrJfQ5DjcuUlA0TKlCdLHpM/QrKy9+WD8t8bTqqfh4lbeQAwQJIh+MFg9UFFcpn+TcBlGDJblPNKq4iD9BnArIJ4nUamR7oLvjGjAGRAw6T8Qy8lopA8JfSWQH5FAZCZTph2wq+kt9/6HQqLc6VVWRhWD8j0IEhRXZgtgzhOOY8MEKLwZLO4sHwf2EZDAVkr6WuTDiSE8wDUMcjaUDN6ABR+AHJA9cQDuSx6fYCqpEyTZLA8uilrBtYK/W/SMbYeslk4DZ9lBUUKwPTRJD1gos62LWN5q5938//jeIDv6R+Va019kEybkKSEBP3MQBKIMmy+wL3LMD2CQQ8mRVj9yZevpaxS/TIyQAgU88HSDnwQmJgE6GES/GRnENggMMtpSshKqlDWA8EPBqcP4m+CAJrsD7Ae4uMz8NA9OINNYtrletxOMLjC6bzO9AYAFi4/Cs+gDgAiBEK4PBknGWllkt7bKsm36qUgDPDBtsj+mCpbD1C3jl+VBTLy4LOYXjcDKLEDIelobwhVcC1rVuPhC6wPnDuGDcP+GB5hUgCcIJDLcdOrCMaGfSTwy2UV6AkD5FAKzA58mHW0owiOD8dZ/w6MB7fF8SWwwrbt49Pbc0WXN+MTzYt0YFOxjzmMgEq7fP/78t3f/y58HnrxwvOZ/PLFX8rBgx9ohTMLLPjbvPDCt2MgiG1cmQyiRBUZEUwSMW6gKpVVp7z38mXceGWYZetvXtIX+jE8gzFZ4/GgkAEJG7I7DNuML+j00Lh44VOpq7OMJ+vrr8qJk0dUZ9ouq8DECRM6fJnmizvbZmT8/+y9aXBc55UleBO5IPcdiZ3YSHAnRWqhdlu7Zckly2WVXXa5pjx2T3VtMVtEx/SfjugfPRPRE9E/pscxS7XL3W5XjVx22ZZtWbbLtCxZ+0ZxFUECIPY99wWJRCJzzvkeHpCEEiuxUcwbQQLIfO9737vvZb737rnnHDSMNDZ2ACTfu6xcVblThEAGpRUGBrrU2488+pzcf//n5B10tJK9sVywu49dtZOTWuGChSyCH9wvjslgJzElSag3zwd7djZyW+xsLFcUWm5bn6TXKelJqTkd9Dh65KgqPtXV1YkRrOStvs5/knK5XftCEOTQocPy7LNfkMuXwGg6/bJiz/HejcXhF34yoRp07r3vXrCdda73+mZX8QBZX752aumHHnpYaGROkJKf2dJmpK2eE7/7eZ04/ZvnlW+GHjTgTqfIiRf5xS++J6+/8Uv1O43OT6IbnYViMvVO/+aHiiVBiSDKM+myhrxuPfbYF+W/fvd/VzJCvD49husA7794vWPH+sjIYkGa22PxmtcHsv888J3Ut/fww3/4se579eaSILhSX9eMRouvozEsq4CN0//8A2UmzkJwe1unul4yeC3kdZGAPAEMXqMef/xPFkAagur33f+kKg4TxPnhD/8vuefuJxSbJYexu7reVaxKFp0p+8Wi81olfZYyQDjX0uB+UAqJZucsvutF8LuwfRaVlyuic72dCF5fbj95Uv7tv/m3Owa0q8aJ0zyHryhmKIPsGTYE8pmYLI5z599Rr1fjGZ9MoXvueUJJmP3ulRfUeUfpK8pH6fdbZCoQ1GPhnuwQnj/BwNfVOU6A7tVXfrzuz4wRHrsrBY8hwQGeiyn4h/0ezWvvvfuymkPngbswhwOq9sMYGemTixffW7in4nn4GTCw+BngOARLeM7Sd4P795MXvq3u73hPx3N4aLhHXvz5dxek1U7e/ogCfjbjPoqnIplUBw8cUwDIwjl86gkF7i13Dq+UG/29SCItZ65OSAQKKqx9MJrhQZuebl3L6tctw/rHXngOt+/t3JT7pO34BLI5kOwMJyT6y/mAELxQMlk3GBxjLSDKDW5my1YniyRlGIdHsFa/3LINVQbe8QysCQCJx2AY7TVJ+4FG8fldABiS4owUJHgoJSelU6KjbnyRFCRgcstgBF3xybC8/iH8PYwpOeU7Cg+HsLzRr7EcQgXt5kjf88gQOpnRMc/ufHaEmo3+BUNvMh1S00kxHKmVkKFD0v2TkjWSpqCtPQEQMjUKtBy+FgJf9CONh3CjYsIDfl4GB/rUQu1t9aCGOmTOHZRUX7c0OKtxgQC44YX5Eb1I4H2Rt8FPpAYACwABBkEI07gBF4y8pK1ASxAtDkgpmKBR78H4dTYxg9EQi6dkOgJKlL9NvB6nTHj6ZdoEM6qJRcZEbbtHGjJNMnk3WAU1qNYDNLAb8RNItzVgkGGYcZtQ6Vf+KB67uOag5485AkoRqx0aojmCHzRRhkQVcqSzQAiepOapo/VNDShkw8y7F4wbNGuac0nVda4DP+yiTVcV6AwgyalhsThrIO3kEWMLtLzSEQV22JWcFy7yYInACx53rNCBtMMM3gdDcHc7JMXgs5Gxy9A7F+VlgB8D8DPx74c2P/RLjeNFOQTg51evQfcSkmSU1aKBvBMm4KTlZwAIuIJad1Au55JpG9gGbrs0oADT7qgXWzoqU7lRaQztVXO46EzB6B1jIAd+dLOMT4zL2cnX1XvNe1qkGILJO8AThgvzdUIKi5FG1496jf86GiXv1dhL0/BE4XJzYJ2QUUNOEZkbIXie1ONfCt4j/cNj0lJDMCOtAA1l6Q5WR5zm51gmD8ZSNqwVePhW0N8ghbkMtC+takyCIjRT14M+Iwz6iix4myy+vSO/0ezrzgMhdC1pH3uaQg+/8xt5E8UjBgtGBD0O4AH7+LHjuDk/BNppBz770PfEnclWgh6lCbkZGSB8OCH4MTb8tirYKuADO6WDFPybYARZCHGAmAx+RgXFSBbZCWyS6aADJnqxn8X56flCPBklZIiUAgf68h8DV7A9Ag2h0AGpApBC4KAUjOG2+F1CoKUGN7qBcE9Zxod+XAh+EOxg6IwP/uRr/LeUKcLlNNmsFzTZrK88I2cbWzQQA+/pwAeX08GJagCQnDODuSFQtJibxYe9UqCCIEbIvx/npksmAFqslB+OxRwqRg62wZwrybESAEqfD/M/g1wruSwAu2bz8g+QasKV2FAG+J0CnqEyLAyhiPPU5z6HokOHKuL98Ac/UUyQb33r2wDmrfKFP/zCphv+bmjSO7wSi080av3ZT7+jiv533vkp1eRA7XRdT50FDz5wT06N4mH4J6oblA/D7GrUg923fDhmp+sdYESwoENDzNd+/yPV2cgiDxmllNRg0efw4Tvk6c+x422/6pSkhjsLUuwSJFujNNh1ymLXg596ds3+IQQyKOPBblVqwLP7lt2x59Axu1p4IcHwxBNfVrmgTjQf4hn6QzllJUqNQnNgK1J+9FaNQLBGvvbHX5Uv/EFajkJbva6hQTVxbOd1/lbN/Y3sNz8jg4MDAIrfgAzPkOpCZMMKu3NZsGM39V133XlDhSLOr+IBciNHaevX5XXz+PHjwi76nfjMKjYEnhto/qxfJ0r3miAIC7B60C8wGKxTgAYBbTIUKH1FkEIvvHJZXrNYUO26ck4Vj19/7SU8wx9WAPbwEArK8FcgQ7Q0eO5zeyyW8nrF4PY699+GbZz8mATQdSvP/8HCKuV/vvjFv4BxdavaLxZ/uS1+rnSjbmVcjibPTnSjf/rTT8upu59S1zcdRCCYcerU45hPRAEd3AcWkmlQzc55Xid53WXRmV5UvE6VAhA0RSerRAdcls7VhoZBBseotmigTOkyLILT7Jwd82SGMQ80jtavh0vH0/+e3QGpUZ7DLCBvVhF5uX1b7nXeR7Fx4sMP38Z59auFc0dfnufShfNvq38MBxQPKDVFo25KX9EvhvJnvH/iuaOHfg7QgJ4MC95LUSKNclSx6AQYRq9d99nQ11v6meH2+JnhvdlamKo8hynr+YU/xDncuEdtl+cwwbzflzmHOXfeo+neHzq4wGYYbjMF+XMCHQQiuK9kFinZZTSP8j6SwA9lv3R2i46jca4rGdtzf/XzfCFp87/wfo3n/v79dypZMQJRPId1Cbely6/nb7/bISf2oZG4hAHSFESVCZ7DGwn92WUj6y5dh9f1rQ7eK0wZuqTJcFfZTdH7Yq1Bn49yMlE7ZYCuz5vm7DcaZMpk83EAIOXDaoCHM645FQZI+fzcTK+uCQDJpElrxRf/0Wa1b0HcpOwDODaF7r82X6MkZ+cNa80OqcNzcN5LPJOnD+SDnAV4QgRgNx6Q2WJErXt1YFAIfPibNNoZu++LfqM4cPGmmXeh4FEMB73AT6PyNIreMQAgjIFZdEs7O6Sz1qY6HsXbAQBiGsW1GfF6PbiowVvEAykixCz8NmgWbkxMyfGOFomPJaSm2qdYKalsFHiGW/prMQ8U3jpQxDwI/xCCGTEXuoMh1cUw+Ar4f0zNn5FOWLAMLohu+Gp4AoAOAuL0ZsBsaVY5MXvapNPfCBNwFNDSJpkDeOGorocHybQyQIcAFCrmmkbvkXobxoWUWKJGxpJpgB8jAFEawaHJKSAJC4ttziApmA6xc5mm6ZTI0XPDgir3D3bfKu9FY14y6BJzAbBIDM9AQmRIzdlcU5BZI+Rl+vElB4aFFYCQHmMT3SgOgMKLsaIwU86GTbhhgwkWvjD9eRwbmI/vKdRK6swH0jPbJ+4Td8hBHEdKdnE7c44ZiQ5dk1A6KHO4oIyM9kkCIEcK6xJ84nHhcSgYiBCg67IqJEYYZcdtKZkaHlbsk9m8T0YSgH28M/AwaZIi5KykoJ1XfF8PdjFkwxqYRjmv2iYASRHQo/3oRh3LoxBkwk005K0sGnBldINl0uBUwFABJstksxj9uOh54b/iLoKkY5FGFFBpGu8ozkr36ISSYqGHCAsCuDtXgEgqS1ksTeJqDgU7my2j/ErcBF2AloQh4WXLxJTsVjo3reSzGKOQxHLta5s/Rgu7sSO/2KFz+eCJJrn3mNYxnIbU2/Nxt0xMhNBp+/XrQI/tMGJdLgk3IwOE9HSLCaDlfDG9FPjg7wQo6L9B6Srd74Kvs4DP1wmOUKJq6XoEL3RfEJs9qwAL4LsLweXLgSvq9fn6XinLgSvqQIhhelJaAGa3/fjnZQGM0uNDqSsyOVw/+9vrDpth4JIkP/cvFMhRDgThwur1fwCT8EtPy1UA6GSilc6DueE+qtfmvUA4f+UF4ndcx2IpXU9njdDThEF2zUr54fHhmAs5xrYIspcCUBxHf5+/k4FCTxYWUCuxtRngXYPe4fw3f/VXyofpv/zn/6RAkL/9u79VEnz33nffhqVdtnb22zs6AYhBdMSy05MMj2RyXD1U8qGUBQ87CiVkZxD8+OEP/k/Vrcf3jh29C9c1u0TCY6rI89abL6mORxZW7rzjEdwDmJQJJgEFPsSyCEAJKprEKoNXPByzcMBORna3sluWxZYHAIK2tnaqJOjFqx//6P9BZ3oa3ep/sdAhu1KWWAjiwy8LBfw+pf76NLzhVguuF4JudeihZxXjo1wnIgt2aYA7fIBnQcvNBg/InNyqwev7w488sm1NDbdqnjdzv1kk6evtkf/8X74t3/nO8+ozSO+Hex68W5nT+3zQ0Iccx12n7kK3svZ8sdHtVzxANpq57VtvMwtw650174fIPPzSl/5mQTJopTF4zSGQQb+L48cfgEF0BxquOhRAUdrVze9yNmIRiKA8FIv8mr9AlSo8f+lL//MC03C17R05cg/GNpW9HpRbl9e2fXuPKa+MY5CuovkyGR8JPCfNQBKSslhBfN5obk2QnQxHvetfH4/zp2TXU099XRW9uy5/oLr9uT7ZKWwioJQPmR8Efkr3vbGpQ+WThef9+0+ouZcGr4lsbPj6f/tv1MvMx9LgeMdvu1/+uz//d+q4kI2pSxstXbb0b173dyLW6pGwFXPjseKx/vyz31CNE6sFz2GeszyHeWy/8if/SvllkLVU6tfIcQNgy7JZhMeaTB02X/D48Rg/88w35IEHPrva5tR9Gj8zlIZba3DbbFDheXr06APKB2ZkpE8xVvRzmGws3qvRn4TSczr7qnQbPK8J7HB/yVoiQyYBH1qew/v2HVPgIj9fpexh3nfp+SQLqtzc+b2x9DxfKu3Gc5jz+rOv/y+qvsdzmOc6WSYbDRqdH+vww+/jKGpRrOehJoY6me4BstFxN2u97Wou5XxXMvlei4fGSgBBwjC07QbopceA87egnftGgkwZSoGREVOOKeMuarWKG9lGZd3dkYE1XfWiQK0Zly9EZS4XBYBgVADCENDZ6AH4N0zNiiVolhCYCgwCBAQHAvCpICBA0MMFcGTwWhE67w5xDY5LbGRc9t17m1zF8jF4eTQB5fWGNPROl3cyQq+O1gwjymZa5M6jNdLucUsvZLW8lpDUwVB9LNQgB5ov+ngxAAAgAElEQVQ0BkAqqnVHpFJJMeHiwbBXo/APPxGyEDLOBCSQNFZAKgtQJ/9xPcHUGLq4ky7xwKgd+kZqDOhpobiudSbH4EVhQpciwQqyNppCFhmNUI8fczVpAEmbJy5dYJLFq1JSnLFcBzZwuCTkqgTdzVZQ/ZrgOUExsKQzLY4UAQKz7HMCwEC+hmJgmwB0MQHQycNs3Aw9pgkYdZT6BLBAyCAIYsdFqzQMKP7buB+IajBLkoBgCGzEAQSYIzCrh2TFh6jr0ePkEJbJOVJiwpdHwQzvFOSngO1Oj80pmaqH7m2RIcj8BBtulyJuFOlCz/oqJcEaYfh09txZzRvDPyvnNXxCbXd88Mq89BaKmwBzquDFMgvTKR6FJFgeTnxd4WwQQxSeLsmMWOFNwfyaQU5BSVMyM/BuoSTWfIzCd8PpsENKTaMwpmPVYF7kJNc9BTkbH5gmkPgC48WIY0d5sXAE50ohKm47PEmaWhRjJ+xOYPqQHgPQYgIjJuqMif8gEL3UmLR5a2VvLAS2TQJeM2AlgalCGTSNcbJHEjF8BpCPOczcBXN3wc0qjdFTWbxC3w8wRliyoaYp/UCMyCO9RnZDFPEAnZ8ryhyKQQxTtVM+9akHcfN3BzpGGtHhAZ1Q3BTsdNyMDBDetLndzUrmSWdzMI8sprNQTxCCLIXpTNcCC4LvK78LylABBCH7gRJV160HYMQLBhZvsClxQFYEmQk68MGfpeCKkn4qOYCUnOLYZJGUymxRls55LifHITOwHHBReh7Q04NB2avS4N9kQC3HAtGX5TaOfx9/AQTpbtAAEH0fdJAhHseD4jw7g+sRDGE+9dyQxcLQ1yPYFAweW+iOCoKNSEYN5atK5cf0/HAsHWDhODrLxBO8AyALZO2WAFBcj+yScg8IaiKV2JIMaJ2Be+XP/8XXJJuJKH37C2cvyvef/wdpadkDqcc9lcLtfObZNUhmx9HjDyoj2ZpQswI/2KnIAil1yHXw46tf/R/xcHxAfZcQNCHL4nvf+19Vl+DZs79XGtLsIL3vvqdU4YkyHgx2CT7y6JcXxi01/yT48cXn/hoAyOfVQz+DmtnsoiUAQp33ttbD8hDACcqjlgMnSk8ifo/qRaHVli1dT/+93DqoCSh5Qj7IswOXHiHcdxYWyi1fbtxP2mvb+cD9ScvdTuyPDn586//+jvzj88/L2NgQirUH5U//m6+g2Po0Cm2dikG/GVHxAFlfFuOzCUnIFL7fNtZJvL6t7Y6lWWjVQef1zogAAYNjlAtenzr3HVf/GLr3FIut/LeVwTmx+Hvs2L1KPouMSBo+Mzgv/dpaWvBeOh99DDYUsDgeT0BiFQ0Lq61fmk+mZum1idfFteTAD28YXm/XEzvBAFnP/LZqWd7bn7rrMTGAtbPW4LHhsVopeKwIRPAfQz+eG/3MrLStcu/xHD58+E7c0x1T5zBl4ngOMgjg6L42y30GuRzHuA1gGkFKMo719T0eNL7i/rLcZ2Ahn8t8ttf6vcFt381jso7jUi4P+mu833FAAWOP5eMgCsGRnY7tYICsto9rBS90gGC58VYzUV9uvRt9nUxY0UpbNzQUmR3TBhRwy1+e1Ng0gs+ugzFzQxOqrLxlGVgTABKCBwWL3ZS3mrzYr9ga9bZGqQIN4kx/RBqBKJvA6Ojr04AKShcRDBiHBJLB2iJTKEDHEx8qoOPiVEICng54nzfJ+IVp1WF4orFeqlvbwJYAlRXG6ikU9Rhz6Na9ZogLFT8JVTxx9LACBp5wN6Kbbxqdiigwm2vFDDkjxpvnT8twPzr1UTCfrfaiRM1xXPjCd6AwADYFivtFG6SLgIaHwQI5VH9ChtH1f3ZoXPwAOZ44eULq41YZKcCIGD4lbhiqk91iSNdIscEu/SPoHsyHAfRoD/oeh/ZpK8I0PA4FKW/THuV7okVYxvr6xeU9CjbHYgTAhDAAqMlZp8BkCEpvwioGe888aKSxa+LpKiDVfsxhUOa6UcA/eFANQJBkOuKRqVhGgSheFFZLo9YHCS1XXL3EMcRTK0V0fpIZkk7MiMW1V8yuUamVFhT0cTM3g4sBusAZZFIAJlEAgccP1gMksshAcYYyStrMFoUxPNgfLk+DJMcBVASAgODCykgjXzQS33vffs0XZbwHGnroFsX8MgAbTLa8Al94WkTTefFGrRIzAnQgu6QmLz7ALsxaAWBZNoxzzZcXV1VOsTO8c9r+cDuGsbiEW6cwJ3KCcIM4npRRgCZkDs2BzRHOYz5OPAQCZGGJNWrJizEdkhSAINccZM5g3JGp0ubM8SgbQhaICx2vKpx1MF2H4pg/g3PdoMAPgiQpgCEOHF8Putd9uTa1aF8c28W2kmYAUzA5O4zPAs9lSxuktXBMCXoQKNF/ahvY2f+TmZy8cX4EZmD8LGhBOayWWr8cpkfPLgA/OKubkQHCeesXfhb0Gcq8G0EWh8feqjw8lKQSQmdhsAhPkMLqqbuO6cDPjpKuQszhgswumdnZnGbijcI82R3KK2MJuFLKXuDYyvTcc0CxHDR2hPbduo+A7RobGWhqnjh5twR+/vcfA0sIbMThvaN8PrCcJnulpr1smMDgys9/h+ogA3PDuS6d/0JuACDpXh16buiFxJzoGsr8ncG8kROngyBKegwsERqjlwIs3BaPB7ddCkDx2HBe+nFk93hpp96yO1Z5Y1MzQI3dL335K9Lb06O07n/6019CHmuffPOb36xIYZVk2o77G5rHsmBDkIHBByo+oNKgsqamXumvU6pAf5/eF5SYotmk5okxqB6SA/4aBYLU1DRC3tKhpA7IuqSEBtfluFPhPiUbQR1z6oufuvuzqiCgP/M2NrQoWQVKPZChcv78WzCYfQRMEdXVsO1BFsnFS+8qSQjOmbrt7Cgs9wC/7ZOrbLCSgTVkgL54P/npTwF+/IMCNgl+/Mu/+KZ89StfQeNK3RpGWPsiFQ+Qteeqa6Jf3u69LG13fVr5GFRi9QysVHQtt/Z6ly83xnpf04F4Xg/5rzSWAhPLja01RfmUtOR611/rNpbb9npf3ykGyHrnudnLb+Tc2six2cg6m7GvvMe5kXOY+aGkVXOT1mirz2m5/dlIPjdjP1cbIw/WR+9IXD7snpDc7GKV3EVZ8IN10hDUntlXG2er3t+uhhSyN1Qxd0mQ7UDWA/3EVosFgKDMghzDhEbNnZCHUtsEaLESw6XMlMu+RCbJZoxTdvDKi7smA6sCIHloQ46DBWAoTCi/h5Z7DoopE1XGzmRRFABqhFr8EoLu5EzfNbkGmlzqWp844b2hWBYOfODQrJ/IV6OjvhreDhnxBZskY+uHlQf8OCBZRDqBEwBEMbdYkSP7I4WOXJAAVFA6ilGLzuVxGVa/o6lP7FXT0vXesDS32cSadaL7n9r24OJB3mrGCnNwMAhCoIO3w+icoEkmFJEa0Fy9CRTDAJBkYcx+9dzbYp9EYfA2+JW0GOSOrFZ47Ab4QcClZo8P61ql/ahNJrEeowYgDIN/17oLMg6whHNL2eC3Aa1EBkGUKsinaNac6iUVM5BIUvKd6FbOFXMonFrQ6QwWSaIRrIWoDE9EIZ0Uljrscm4G0k6YI6MYrVKG6CnM2wTmwYwN5ttEFTgUWC6mOD792tTnmSXIew5ADro0hwyg4Dri4pq/mUs2gMEyclTkQo+4IC/WAoCHoImS/8KxYCkx0ADAJDmqXpv87SXQOAF0gVmxF//4Ghk3+apxJUElck6qZt0AODS2UHxsVHKhGTHVar4mlLYia4bAhgFME+M4jLgBMDS5ahTowt0w5OvECQYHgQsTZLcEYMmcD1RrAGSBcbeE8Rq9RWyugGSH4sgLipcAdwxuCyTBUsrTxTxvJA/rGDHHUTQG28RlD0gUTBWXtxpKZxbxpWZltgrSXGAEqQDzg+AHvVBqIWg2lSko8IMAjBkgScQECitke4yWGqnGhYLzcxkDivljgwt7IwpG4D/huoJ9T6ELrB7eAhZ442D+RoA67tog5DxW/ahpc9nC/7Ozcwr8ePFtSMiVxEPH98jdRxrEAYms3RA3IwOE3TGxRK8CM8gaINtgAqcDGQf04WC3cSwxdp3Ek57r2dwIfj2wUITnUfBhDF0yayp+BR42WgGf3j9NYGHR70JQ0CcLYjlwRTElcLPDAj9lnJQJONahz8++0XGhfBWlrRjLsUAobaWzP7h8ubC89TNJ/OW/k2qyRE6/UG4RzQcE2zr+/Z9LJ35eOTUn/XvaNV8UghPpgYXc6OAQ568DONzHuLlPARt6bphjskYKBYDxYNfwdz0/BDvIBCEYwjwSHCkFWPRt8PhQlqzWf2gBgOI6zDHH5zgMux1AO5lvldi2DPCh4I47blcgCPXur127Kv/043/Ca3dCsu/+Cgtk/kjU1LQraQyen/qDKR9ECVo8/sTXlNeFLveUSEQBDOJBAZGGdxqNPRmUR9A7/NTfuEdZGhybIO/4eL9ih1CrXDP4rAFIqzFr9XVoxkozUAIglOqamhpB12Fw2a7fpdvajL8JyHB/CX7QIJdMFxruPvzwc0p+pBKVDJRmYGBE6/RmwxR9FXZLzMCv5rXfvybf/e7/t8D8+O//h7+WL3/5S5DACmzJNCseIKunleDHr7veEd/t7fL1P/tjSLtUvlNWz9rNtcRyhd617sWNrr/W7dzocrcqA+RG83YzrL8Z5+BmjLGTuaLyRc9QTH7w8hU0Ys+bCGNCrXVuqQ86dhwA2S4GCJkL5ZgNLPav1wC9nEQU/TF2MugBcqMSWGSS5NDAn0MNutTnRN9fjs/tJGRoJ3e1su1NyMCqVVm9cDswGYHGbEKBHTVBTeoq7/UreStGMZoRcgn2w6Q65/FJTUOthM1atzHfT7u0dSiNhSocilMsZaXE7zZAQguFfgAAdhMezg31qrt/aCQBLw90qrdoN/jephqwKwblJZio60bsHJfyWyP4QhuL1cGMHObh8JuIROAjAoNqPhyYjdPYVkTOwMCcclPKrwSgBX+y4N8fByPhzv2SnIiowj9jMLrIOmCXdKL7HJadBsOjpJNjHgjh8uMojnO8JDro5yDlZAykpbYaN8NQ9ErOTkq/zwr5r2nxT8LXAh+paXRaJuL4Em4ek4ZalzSYYQPra1ZgCxkrBE4MWId/p2J25S/Cn1nbjBz2QfIpchkQiAMACIr7WIPgxwyK2zyYlCljod5ur5KgjQ9xIUhppWQi3I9/AG6qNKQ7XsVinlb8uJqekl/FtEIo9wcrqx+oauB37ASKqC1tOJ7WNql1aTJlVTBsny4QLa6XWUzC0dQuiSqrAigYA9MDcsgHo3ljreRdBiwDs3PyPABe2PIoKgIUIJChMu23Sh4AhckDRgaWdakyZwAAjE8ySY1R0wTWEViYKshMyVsyMqe8ZrTwTTcAxxiVq9A9pYE3u7+VmT1AkxF4g2jhF5/DBNAmJ+YEmD0wmB8fn5KQB7mw4XybyuN8AUB2sB7bQNc33svBr8SZs0gBhaJpJ89dgzjDLoFKmRAvMM3h/I9CZs03ofxQklInRQApeRuplhHFiMkbP067XJj4Nv5iohEs9pVMIT2qLRA9w47wvd0SNxsDpBT8YLHd6WxEUV475xSDAeADWRw04Fbm5iXBIn8YHwKnZ0xJXREsoS9F0NOpmA16YX8o8r4q0F/3OhgQNA4nuDIe6VOm5jyKS1kULOLX1p5UwEwS63COqeMhcQHQoITVSiAI3yP7o3qw72PyV/pucAy+z+XKsUAIonAcLkegRY1525NgfpFmrQHJzJOem9L5E6CgubzLsUfLDZYjo4VUaxZjY9KrgUEYiywXLse86b4gBEQIXhCcKpW/0rehy2DN4btCn0vpMcyYxisgyA5+MVDW5cknP6MkFv/u2yNKCus3//wLMBuOV1ggOC7KzwL3W5Tm0GUW9MNFQIRSBeMTo9Azv6D0lKfCaExAQTUH2UhqU58/+6piLlISi7HSg65eE56chE8ZmCFkU0yC/UUvEQZ9RHhvw+B3IqUXaBpLfxL6b2wnk4qyKUlot7/55q/kRz/6f5X0FcGPL33pr5UsRIXRpQ5TJeYzcObyoFzojsrJx5+UE2CCb1dH5moHgIWRS5c+kv/03b9TXkgePHNR9uqLf/iFLQM/OKeKB8jKR6YU/PjGN74hBw4e2jUM6pVnXnm3koGPZ+BWZYB8PBOVVz7JGWC9o9qy2Kxjxd+7IXbD/cZa/D9Kc7UbGRI2w/VsO85XZ7fk52syqx1vnb0SK/ajnxzS+PNR6geitoOGsErc3BlY86efIEW0ZkJ6L5slYm2A1wK6bmMJSUMuiOBF7+VhGJ6joA4GCCPuAvMCniD0sNA9QcwGmJzPG6HTH4RFfspLFVyDKJgXJAOGhsIAEJbaggQtAZlsIgVkWjqgqUgAwouyGX1FCILQ42EymJK79u9VUk9ScIIdAhNtAB2GKnTlm/ILJugkRg0pJgXkqjBeDBJGcQAAozBWithnlMQWg4bmiVZN4mom9pECSTyYpwAA0YPbjxu0gjFBFX1c7pu5vlVmU1onGZeLFVtkxLKIOCfBGnBRWutyRHqR0xbIPRlCi1JWSnKL20Pwd6iLqWgDC0Vjn2QgQ1WvgI9qmITq4YQMmLPgkiaYXHO9YnQSc7fDrNuJ3zXgg8ySXHVCPHVHIU2WkjFIfzHGzr4vv3z1LW0on7bv0JtZGFv01xZfkSLM4q8LsPANQ31SzC++/tIYdhDauMUaZFcjhiysUoe8uqry0m/NSUt2XjuXKU2PiwOSHSoofwUghzEGn5IRSGvFLV5pcFVjv7ISsQWVD4wJBu1kkPRZEtIIUCRTNQEwxa48WujfYQEANIui6HD68jxAYwHmouWYBbZZADdmiAdW2SIy2AOzO4Ad5oJ2vM0wYi/aAL5Bsmsuo50l9nktzRmYozfBO2NiclTSMJdPQnZN7BqrhGwSmsTzJ1kpuyE8Tos8dU+b3Hv0ev1Sl80ifG+3xM3EAFEGxGAvsIDPwjyN2nT6vN/bKQW3Jl9FDXpGta32+kL8vMyVnnuCIAwWL9lVTfYGQ5dj0iWfCABYrSfVewRXyCajbBMpYAtG4vNj02idyxAIIDBAgCB8LCDOs6/jQ3N1WRCEwAUNzqeDzWXlr9TGEboMVuzEA4otEp0HOvjeUvBDZ5QQkOGcaHrM3HCOzA1j6fyVUTm6MpibOUdezZ8d7rrxPNfRDdM5Ht/jMtwGg8txDDI7qvFv6fhchnklcMX8cAz9GPI1BkEkMkJ0Pxb1YiW2JQOBmhCMtL8gL7/8Grwq3pPTv3tVHn3ssxUWCLJPo3L6dZQrYCTRPED2w09+/G3F2CBoYbPZlLSVHvQHKY3VGt/5mc2BJk/wIx6LyOnf/EDegI9QueD2GDRnL8coKbfOZrxG8GNyalQZwP/iF9+R8NSUkup65g++ocCPCpNrM7L8yRgjOZ2T7v5x+cWrPXIEfmhf+9rX0OilPw3s/D5OTozDS+f78ibuz00mizz66EPyR899EZK+8/fpWzDFigfIykmtgB8r56fy7s2XgQoD5OY7ZpUZrz0D9Pk4vq8GCulW1TCsh9Nhlg402u50bBcDZDmGRM6AOsMmFPThdrrTqSy7/fWyW/RByrFcCIRYjVDDQU1iJ6S+yu5g5cUNZWBNAAjlrlzZKiELhHEXOuST6JSntFUOJ/z0NIrvkFsQJY3lhppQnTJBJ+vAYtIYHARByPjwwkApjibBGACPVjAVsllIx4wWZSwyJX7Q0aqhac+yOcPtWCzKhke6xZ3XCskEVzQmQwAgS0DcOYIZKMRB6smZmFVm2pTBms0v7h4BEputRbzooBodHcCs4XUBrsn78wbr3B5loJzmDJgn05qElE8rfvE9V5sDtXi/jM8MKtbL1DzIkZwlQlElVfDbaDYdlYn8BKSX6MEB0MGwT0YSsPie04r4ZJnMhmFACkP2og/zC89IfD/AmjS07AGeTKGfWTeM5zYVq4QgCFAhXXqLc/A4J2Virhq5NeJh3iypTFYxQIg/MYrRrEzCZJ4snHg6LDmYxvP3Yr4HXbMhFAQnZDoKbW9IYDSMm2AM5ZI/OFwn8VxAGptbJBNnNlHL93gk6y/C/2NSAtUGeftnU2K8PQj2hk1qLc2STY8qX5iCT9NUvIRCQ6PVK6/0zMIjZE4+dbhTDGbtnNFmtvh/HGisJxsB80akG8bOjFp7UMb5e98V9Tc9P0Zq59kbowlpwL73xnvlpNst+/Y0KyDMbKBk17iS5DrWercU8DuZPMrsHGPU7cV84TijZLZwxONmbTzYgcMXhCCZVXIpUEuA9zS6vZJph+wVQA9GY7BT/aRXyHSc56K2LyZIaAH6Q2VHpOvKZZkeH5aJ6Vmpb2gUK/YrMQEWEDphja0A6sB0sbp2x0WBHh/10Lrkv6WxewQfbh4PkKXgx1KzbL2IzlyTpUEGBoOFdLJB6GGhsw7I9GChXu9MJviRSoGpRskqsDwIrpAJoWSeIJfFbZUW8vg+xyodm+tZTFqx04iLNZfnP3ZnB869KfZXX7hO+mopE4TG5uFj94hzalDJZa0UlMGygQFCFojlrX1q3KXgB9dXDBAs662pk8TtD6n9XU9u9H0uzQ+ZHwKSlS5XRdCC45YuSwktyogxPwSrCLhwPT0/NJjXjtdicZjz5WulIAgZJxUQZKUzYfPf4/fW4cOH5A8+96jyq6iwQK7PcTnzXX4+ursvKOmnc2dfk7b2fXLvfU9La9shcTo8C2yNX/3qeXnn7V9t6KCxG/3AwbuksVFj9y43SA0+67W1Ky+z3LrrfZ37TRP20795Xn750ncV6EPPjydgEN/RfqTC/FhvQj/hy5eCH9/45r+UlpbtOU/Xklbqlp8/f0F++rPfSBi+hffef6/86df+VNr3dm4pQ6XiAbL80amAH8vnpvLOzZuBcg0UN+/eVGZeycD1GSDDos6P8n+J+oW+xG6Qu9wOBggL9rbi8mCP8gdZ7NVe8RRaiS2yUwbhygS9DIhDpspK811uRxVoskxhbLcCPcvtS+X18hlYEwDCVQ2NftlTUwDIEFLeCGPXLktkqEqa9juVCbqKQKPMQdbIAeNrPViYZrDAz6BxOAvXR1oILgDQgLRTtdeB0jQMfS0NYsGXlERQ9C8a1bk8CLYGI5abkGvJWRh0tkjQD9ms/rhYUFcuRimjNC2TkMYSgBxVBRSrAX4o9kcJABLth0yWp1+siRwK92GZy+HkpiSTsx2Fswk5MBaTyzNWvN4rH17tkVB9k3TuBWAyP38yQ/JWo8QBBBFMGQebghENah3adQBACH4wuI8TMIsvNGTkWANYJjGnXEr2L7BMMtPoRsZumQPalxHHjuGDRsCILJmp4rD6SZaKFosMAq+H+YBnyHiVJP2a6a8hCQAEHc4TAYNQzamublHOiqyZIiS1FCCy74hil9ConTJbZM/okfXBiB5/EPygX4fF48YyBhjBB6UHAEgRRcMDbdUwLy5KaD9N4bIAZpokGDUsyKDd3qYBRm+eeUd9EX3mZD2ONYSvYLisS2/RkJ2hS6fdjnPh/WvaLLwWymdpElp8xYVzrrHFrPLyLUMvaCBxgB/0ACD/BawYAFRK0gxAFs8viDsp4Ir5j+O4BYvcFjxZAL4FW7Xt2hIOBc5xTvYqrwynBmAIn5KG9noNHIFHDcGNAuSEElNp5d9hgLiYzaPlmh4m/YaYZNIJqQvBIwWGWrbaRqmHjIgVMjoeax1yBi+YsVlwSrAPAFlAA1Lz3enI5uZkZColUwvnlTYjMrf21Loh1bTIKNrJue52Bgi7pGmsS28OMi90SablDOC4PLs33W5NIzqFc95mx+cHBXn9NeZbX38p+MGCO9e35uEjggK87nvBdbkOC/18n/w1yszwu4BAsr6ePrY+b4Ifof/4rxbADwIVDF0OSwdCaGyu5KRWkL/Sz5MFGSyAGmSN0NVEH1MHQ/g3f6dRuv4+QRDOX88DZa30+ZfLDccolx++ztwsBUH4uglAMQEj5ofjU9ZK4Puhy2VxmeWOHd8jCKLPheNXQBBmZXvD4/Up1geLgRUWyOq5J6vqnXd/rTw4amsb5PPP/pV8+lPPKLCRQCsjHBmXDz54ZfXBSpYgkGrBdwL9Pxj33f+kGpcA4nLBdUrB3eWW24zXCX68+OK35aUXv6fYLs9+4c8BgHxZeX7o4PJmbKcyxs2dgaXMD4Ifzc2Lcge7Ye/CkxPy4i9eVKBvIBCSJ594Uu5/4P5tkVqqeIB8/AyogB8fz0nllU9GBioMkE/GcazsxfIZIMgAsfnlF9jBd7aDAaLVELxiRPP0jYQCGlDcImuk1CODYzqLW8dMXcuclQfJknIb55ljI/YawR1uh8yOtGFCUobx62SwSufAfFYYIGs5Krt3mTUBIN5DPgkNaVIJBD1y/V2SdodhOh1QheC+Ea3wT9CAskM9Se3hOJZLwKOjW2o7F71CCH4wyGQYG7sKhsIQ/gpA6mmPGIM2xWiYnslLmwcgR45FqwnlnXEg0AYpKu0M9lA+67BXMSOuReEngS+1fc5mmQkDbIFviA3m5+EoJF9sJqWLb7E0iaOlWroG+ueBD3iMTGlzTrtsksM+BKNmyKvAwB3G7bft69B8RcBEGLL7IFnVLJOunBR7ohKlLwnCjdcZExEN7LFXpZQh+NDEpDSFaiQ8QKBiXMmu6MU+6yipd9WYl1WKkFiqdVQp1kkbgJBJFN49jmblpWLw6QwTWJyjWE8whGBkHNtgxFMFSRWHxBKB9wSoWE7IYOihyXGhSAeGjTI0H0mhGD8mNdXw0IAnS3QAbAcGcAOycyhllQXzogkyYnoQQND8SOB/AuOQqhYPin4eOdIwI5entUIHAZ6LENA/1AY5NF23bJ5YYC0GJRGekqsDg8qjhTmNgI9BdlCTb++CQbt27KBeVdsnB+z3qc2PX4lIY0jLbX19h9SHLG4utecAACAASURBVNKL+TwJFssvAYAwdPBEAz80cI3rWYLvyNjCXmhACYGOUhaS5kGj7Xsex6e2CbBJU6vKES0+6gB6MZi70WthCU8nYGyPvM/6lYG7D+eSKWSHwbkG1FhbglC9gnwbWDU+S7sC4PrhCWK2OhSDxGyClFFuEoVX3YekZILb/GsaLJXT7w/IK2eHIEmizYeamHcdqJevPgaj7l0CgOx2D5AF8APHbzXwg4eYuvqlBfbSYuHSwjvZGZmM5jtBRkep+TaBAoIa5Qr9+jhVJV4uLDqWjs95N5z+sXie/w8L4Ac9OwhYVEPLn6/rIMjlww+LYX8nZPUKSiprOZN0/RTWZbBmAQh1wbfJfWq/Mjzn66XbkPltl4IglM7S2Rp6cZbjLs0NX1spP8xNOc+O0vyXjs/CbLlt6PtU+pPLLQVBCKBY8TmvxNZnYCkLpOujbnnrrTeUSTplDCuxmAFKQE3D1HxkZFB5cLS0HpUjR+4BQ8yH8137PuIyNEGfhHwjPUDWEro/SE1NgwIXKJ+VBhOW32c22/WfAy6rG6PzO2mtn7O1zKP0O650+QgaNcj8+N1v/0nN74vP/bU88ODn4VlXv6nbX8scK8vs7gzsZuYHM0fj8/ff/0DJ/mXAJn70nrvkqaeeBpCvNf9sdXYrHiDXZ7gCfmz1GVcZfyczUGGA7GT2K9ve6gwQYBhDY/XlvohSS2FUmzUFlUOtPgl4FmXot3ou5cbfDgYImRnWqvJqJFm0kbM5cC2hiv67E0eCqYFHSr06uD/08ljrvpXuf6ZqDG3ecYz48bAUXbJTTJePz6byykYzsCoAwq45ghwM3wwkVuYWTXxTWTAPzkzLnnZ04OMLhNI//Od0FWW0OCjR6IRMVbklCLCBElkmmHZ7Zk0ST/TDODwCs26fvH/xPcW22Nd8ArpqmvZuNSSc0u60+EL4QOLZnD4RLFDbUMQnM4GG5M6hKxKDxEvbwYOqSE/Zp2AdTDinA9D01SSM9KTQ8HNP/QlpboNUCuKtt89L+4FGiUC6yz5ekJqBsDLxPtQWwtg+qceyJuuk8vZQbAL4VVhNfsk2gslg1yS9lAcOZbWqahRIkYbUFYPgB+WWXB1HJGM2ic/ALxVNIkqfDxkg/AbpbADwYwYTY95QvS4878mBj7FiNzgpfwVDc4BFpI9kChE1J+UHAK8VskCkFmwEYC0hEz+mNrFmnTKQvji/KaeaG8r6YinMQPRJCw8MvnPgOeQgJaYHZaP22KqkO0mApEVmssaFcXTGRbquXbzz5vHTvhppxJiMwWsEsYCazrM7jJijz6gVRAi8GC0tyqCeoEKnv1HtB8ELXe4r6HxKgSj6OJTsuvdAM86RSRRYADzoAMv8ZHmsxy9dENNxDbRRIEfQvMAM0QESLq57zQByw/b4ilYsi0YGAV4V1Dnph58IQZogCsQegC9kzVDCrL4Nhu1xgktkvGgxCy8SGYG/B8AQrptPjYGPMycTRhyEHGSNRqelpblWpnO1MtbdBTYNQCiAUaW6kwuDbfMveRSzI/EZ6R7SQEhu3mKukoaAQ/jebondygBh8TCRiEo806dSRUPy9RbAWXhcLpYW93Upp9LlVwJBlhtXZ34sB36QhaGbthMEYaTug6kPWBKBTG5V+St9u5TBit+Oz0ce36GhB/Hyz5UMFgEWfRszkMOZ/tnfKhYI/9EvRP7m30spCLLcfrCgSjNzSljpRuWlXd3MzVZ6drCI6/HsQTe7S7FwlFm9rP8cWG7/Kq+vnAFq8z/44CPy/PMvyNXuj8BeOCNjIyOQhFkE71ce4dZ5l/c8elRb2VhSvQB+8PsgCw+Pq1fOKG8Q/eGA4OVqwa4nv79WanEvMD4+Ir3XrsBYHU0fjdcDIJQH/BDAKY3XDxy4c13eG/p35HKgCVluOgiiL8PvzvfePS2vvvpPSvaKzI+HHn4OfnCUwCTo8/Hv3eXGXy0Hlfdv3gyUY37sJtkrPbOpVApMpp8p9gel5h565BHp6ECDDbpYtzoqHiDXZ7gCfmz1GVcZf6czUGGA7PQRqGx/KzOQyxfk7NVJ+fvTlyWWWqQCtEF2/2tPHAQAAgn/HYztYIBw91i4LxfZImpCa7y10Bkg5cbha/QZSYhWE1xume18nVJWazVAL50XgZ5s1WKtbDvnXNnW9mRgVQCE0zjodkl3PCsDo6My4ZgSRxQyQhHNnDyBevIEut4lii7/ibS0+1wSRjdxDsbSIa9bvTcRhjk1OvIkqZ1MgVyNRK0wwc4nwKRolcK0WbEa/GB/MGYg03ThfLdMdV2TiLGgvCEszQA++CakrtjR34UiviQjst86pDwuJvogW9XaAYCkTY1BlgWBhlEQRGzWGdk7//0W++gjeeZ4K4rrLgUmfPTmG2IchXfJAQ0eYPG7PmBRhfcmrybbRYWvFGSSWB7wNhVEARX4I92mAQJXAaCQ6cA4fLJdyS+98/55uev2w8obJPb8S+KvN8N8vB5S9U7JA7zwepg/kQv9kwqkoKG7B9rcZGlgYwt+IFyGviMEYRg5ADS+5qAMZyKYAhgrYxrY46o1issIgKo2IaG4xmKgHBQ9Stp8WnFCBx0IIBBEYi714BwMkInxwbCUclJNdVg36lIAQhx5Yl4IxCRJHUGQuVJbvV8GRnhUUmCtoMiAuWXyg+LHA1sEBqkETqJYo45Th0E9vVq4L0lTvURGJqHHH15gZ9RVQ9uKIwGIoKD/xZgXhYx+5CSsZLs0eawesI40mkmVt0Z6zvWpde4+/pgyiWfoYJJmBJ+V+gLmF47LubGs8kFhEOTZ66qBf4xdmd7nR99HYdOnZNNoFH8FlJ0i5KxkHDkyReTA8ZPKV4RRjF4AywnACfxu6gB2QUdHxodmxNQdFlOdS3moqLBkJRfMSd5elBa/RzGbdjqcNjMM0BvFT520kmgCMsT3dkvsRgYIC2hJfH+Vgh+Uh1pvLFd0YwFvpeJ+6XZ0EKSU7VAOLNHXIWjT8uqLH2N+TP3lv5OZUNu894URTI1nFzbjcp+QJACQ8fhr0kyQYg1R3HNIAX0e7wGZMYYl/uX/SQh4KHADQDrBCv7OICuOAIhilkCOqxrLjj/+3II8z9LNrTU/2+HZoclpHVDnAmXQCILoxuxL5135e/MywOJfe1urnLz9mFy7dlU+eP+cnL9wcdcBIEaTGazTxeaCzcvA2kbid4zd7hD3vDTp+NiATE4MSi192hAEP2iO/vobv1RgAUENvjYL0CSfn++Os2j3JzQ7pwcW32dw2fq6PXLixP1KXuv82VflzJm7sT00MTi1a7g+Pv1Hxsd6FROjA0CozvJaaS9o3D6K+aaxTT2mpjReZyadBuByEYwVDaixwPy9JtQoAQAyQ8N9an9YMO7cf1L8gTqAO5elH4NwOa5T+tPucGM/mtYNYK80d/09Pszy+4phMq3pFnstw1aW2YQMLGV+7DbZK+4ivT/OANx9/Y33Ftgfjz32xLaxPyoeIIsnWgX82IQPXWWIXZ8BnQFSbV1eynLX70RlgpUMrJCBHO5tCX4k0osASLQEDFlh1S1/azsaG5bbCRp9r+R3sXS9lRggZF/YqnxigvrEbpCH4r4tlcRauj8r/c28lDNC5zq7DehZaT8q75XPwKpPZ+y6Ncet4phhsS8lnWkUjr1J6YH0U+OhBjzg8oE5LY5EQOYy6MytB6sB2vbVVWZxuvGgCukmskN0n5DIIB6yLbOS7oVBdAiULBSaq2btYg6FxAzQYhaghX0O7IBQQEbGMxKMoFSGmvLgh0mZtSYUkyQDBondDdmoc0PSc/ZtOX6qE4BLg0SKbpkGCMPQWBawEwmDORIsyPtdMB7P9qsCexLgBygU8valLnTEa6BMrAr+HmnsI1b/COv4IMPlAgNAk+nSPEZCgRbFRhjVaWRgnnAd+lt42mxSgFGh7q9xHMV1Fs3HhsYlnYtJ4bdFaXioVc3NhGK7w16lJK3IbCFDw+HNKaBBwKIgGBKvgmGxAUUUh7Y/XM9qQiKq8YGeuyoN5jkZiGRh5puFmTIe8LMpmZ2LSDCeB/OGS4dlEP4ouXi/FFH4J4igm6MXYpNCACEHCSsajUt7UPMcgcxZAN4gXJasmiCkuLi/YciM6dJlHJmgTi3kY8jkqA7AqB2ATZu3Ua4Ve/ClACA5lBYrZDIo3bXXZYUnRmBB8oz5UScF9k+XphpLgnkBMIbgA4PHaGwGxw3A1iW+dg0sIMh15K6OS6p2VAy3tShpMX8EcmwAnrLZGRRMpiWRMcoYpD8ItNDzhPsph46o48JwTPslbYsoA3Xul1augedHLjTP8agFIEIDezA7AH5FwQzJTWlL6b4hHIfgR8/ZMRnCqXnw0FEJ1fjxD+clGCQUSKMMFhkvVsfdMp6Di03auisKIXarWe48GJKT+7WuWJUUhBESJRbTzhXs9HnoP3cbA4TgB307lAE5PmsbYX7o+1auE3mtxf3SPLGYSHkshu57wb81L5DFJU2j3bL3539/neE5JakIfszWdYiR7eAI/qCMzeRn/lixQdwodJLpkIuelaXm6KXz4O+62TmN0veP3iexOrvMAkwdf7xZARoEP/SwQK4vc+pR9ed1IMg886QUhNHXWW9+ynl2bKZcFQvMXi8Zd62LIAjYQBUQZOEwb9kvdQ0N8sADD8pLv/ilDA72AwA5L489/tiuksGiXJcP19fszDCujQnxgy25VUGJq3JBMKJz/21gRTQALLooP3nh29Lbe175d4yM9MhHl86o4j9NzM+c+Z2MjV6T3/72BwooOHjwpDiwvstVi+vqJbz/GoqvbjA/6mVf5wkFpNxz75MAIy7LW2++JD/58bek79olocE6g+Of+eB1td0TJz4t7e1HAX6sLjHA78aBgW75xx98S4YGLy7sVnhqCvdGEcwlJf/wvX8vdofW0MH5ffazfyL33POE2v6Vy+9IIhnX9hdzWikOHbpPvvCHfyGtLZ2bLo81M5PBd3IE+fJXfEdWOgjb+N7NwvxgSnIzWbDU35KhoQHF/rgP33fbxf7QD0nFA0TkVgc/TLugaWsbvyJu2U3xuvtJZoBAhAINCauzW2/ZE+ATsONLn6vZhFQaJqNBjnbUyJ+C7ZGDb6seAdSnmmrKsyK2My1bzQAha8NRWPTXLd03+lxEzZclj5rnWmMleSivoUVGPgH+GMxZujABtZwMtHWubxq2zCvJrDVfleV2ZwZWBUA47YHJsERGP8JvAdyQW2VCYQZxMUTmpA4NA/ZZdPniKtPcARPs2SkxuFrk/vY2ZahtzkZkzo6ifsIgUSe/lCBnhU5qnwUGMm7IEqDRL2yZEp2ApksFZfBALqMTYjxSI+OZKRmdhqQD8AE92I/oaPVL5mISUkNh8blcMgzAhMbqBD8mJ2Ay7AJ7pM4v3mqtuDsCtJfASVc6L3mjW4rBFnF2+GXm/EWAKuiKdEK6KWeSZLpHSXX5ImEZRpFl0hgTGkWrYrsFxt65UVWkpyG7zV+A5JWgeD+ifDbQbKixKzATGzSpYyjsG1tYAJkUE9gCeRT7jdM+8bdjTDBFPIW9YkAtbabvmmRRsE8WtKKmBQABzb+BQyigQJmxp8ISAzBA5oTdRqPzxYv61ATyhTDWuuBHkcK4AZmJwagZxtfXJsDCAGuBkQLokZoLgW3TLnM+5PTiBfU6/S7y/Wfk/diMHDn+afWaYQTbx89JrEOJKUpNMSymjLyDnzqDA82UStKKZuN8TQ8CQNbqOjBtyJ4YlqFrIypv+/e0iDVikL5El2JX6EbzxpgPBu51OLPA+PCYxA1JLguYKpTW+vDFHpnqMIu3pUOTxEI6KHVVdGUBsMwpZomoOmuVYtFwzkHPYTUVAl48LgQ/GDNhsImsyBHkzXTAiiBNKmaXAgALgkeBWZsE93jFEKgRpxcFDTBpCEwRpBntTks/wKrq3JiEwzG5885TcqgFx62hXhIAbdxGn8RGcVGRUdnvrZWRubVprKvJbWEU0ZnKyIMOel2ob4HdA4DsJgZIKfhRDcm9Gy2kL2WArLe4X3rclpN80pepxndCYAn4Mfz1fy3hp7+qwI/SuegKMTQM5weJf5PpYPbcLxe/Ctyne7/sG31GDd34nf9N/STwkXnwGblaPyfxmoAcn9LkAfkexy6V3VErIPTtpO58RP1dDgSZOt4BlFErmG40P9w+PTt0uSpt63vWVIjVll35/7m5KowP1hj8D8gC4T8P2IzM2dJjvPJIlXfXkwH6fRw9elTa2vcpM/S3P3gXhf1eeFwcWc8wW75sqDak7j9GR/q2BADhZ8sDeScanLvdkJ1bEgRCCT4QjKAnxtkzryiAQAcPmpoPy5NPfkWtRbkqsjneeP3nAEBy8Aw5oFged9/9sGJw8D3+pJeIF9ukoXgblvmj5/5KsUzeg9n66d/8QPlucHwyNRh33/OkfP6ZbyhAZS0G5JS2IgATj00JQQ+yTxg0XK8DW4NBxgq9Rxh+H5ovADTo4fHWwSyaHnB2VTzmT45R7ie9T7aiKEIgmZJgZM2cPLFPlj6IL0y28su2ZmAp82M3yl7pCZmcnFTfa2E07uzbexCyf5/aNvaHPodb3QPkVgc/eB6Qte7A9bYLDYEMfreVURLc1s9xZWObnwF2n8fj2nXU42YL4CcnqqurxeeDpHWi5xMN8nxyjtjG9oTPXGx+4T2dH1K5S4NNSa2Qu2rQdNCve3s3NH9uNQOEbAwyM5YW7slu2KhHxtIc63/vlD+Gav5cO4az3PQXXr+ZmC6r7kxlgbIZWBMAsgfFLZf1uPizOfy0SC3qXHk8bJomozIOYINhgS57//iQtDQ1ihe+AtMOSCbhdW8CaAmeh/MBl/hxcqZIKYKpnwPsDzwxq2XmCl4U9GslimLy0FQK0lhJScQg72TUUFxKYFnjs+ItoJCM4Qh2zCsxoXsfXf4AZqLJpBisBfE1AXjJDKniQ2kY4KXhrl18WE5Nd+Ntv5jQyVtdEwIbwAFAJyNRS0J5O8TNmCy+TO3wc/CatcLeUG5CUhEYWrvhi5KvRiEZy8LwPZ/PiM/aAmPzHEAH8hvwEA/ZmTYwPYowz56FYXsTjMHdkLiKAAAxwNskMsa8gQEyMSapNHJDPw50NEbAwsjDYN1kYxHBKjZXQOE+cTA7LJF+CWObjJBd+6Qnh4bF1tYBxkyb8gZJo/gwnktLzgbQil2XkLSiMT1vYRPjU2JEEb/96EnFVhgZm1Am6IwcZJqmE37xQbrsXKRPmv11uNt1K9P4VAqnSaqoAKvUxChACK04mRhPIadW8lekZ5jHgWGVdHE+XzCEHwLwQ3DJ7saRBsuGeaMZfSylgTdDXd0KXNElxK50E6Syy2XktQ6AEZkiXoNT+cTItWF5tyovBwA+FEFzofQZPWESZM7MF/AJunhQ6Kc5O5kcA9Mp8boOSzE5pnw9NCN5GD5POwG0VM37fVTJAKTIyMahLw3ZG3EARh7IjqV5LPvBSIGfRxTFoQPtbtl3pF5OPHpCSV8NYl/OnjsruVSD3HYYbJOJcYkb4H0D4IljEYRy+DXgYT5BO/ZjGsbnr54ZknM9iyCVzWqSllqnfPpEs3ic16PcOzXR3cIAIfiRSECyLTksBD9osr0WGZeV8lbaqZLL43M672mxFjP1cuOWk3zi91ahOArw493rmB/LgR+l4/IBl3PkDRl/T/nqxeJ8RuK+Pnkf4+4fSSvgg9JVZIZceqBFpp37AVgekBGAATQWL2WhLPfAzPHTdz0qM82tavMLclhggriLT8vMPX+sCrPJ9IDy/NhIfriNBbmqec8OghSbFezacYAxJ5DAIgCiGEJyoAKCbFaClxmnpWWP3HfvHcDuP5Su81elp7tHDh2C3OQ26OMvM6WPvby/cx9YIK/I5cvvrsv/4mMDlXmBD3tBAPN/9mf/WvmzecA2sdvR0FFymeEyjQ0t8uyzfyHHjz8AhsQFdU9irXbInj37hWyNpsZWGJXnpRpyV11d76r3O/edxL2YXbGZHn7ky2AxNMjoaB/YLGlpaOiA5BRBxSp8zqvVflGC6r77P6u8PqbCo2q2HgCDrW1HpKmpU7FF1vqdSfB1794j8pd/BXZaiYcJx1wqYaWnhdvnvh+/7X5phI8bpbModVW6jr6sPgb/1iWwNhuspITYEPzpjMZxOXbs81LFqmEldiwD5Zgfu1H2Sk8Q5a8+PKN9r7EDkXJ/283+uNU9QCrgh3Y2Op0ufIc3ye9euVTW52nHPtSVDW96BsbH+8VsSUo9GLafpGCDUj0aE1OoDxHk2Uo27icpbzfjvkxODOO5NyztHXd+7L6Lt8b0AUlPz17nd2qFEboJdcaqtRpgbFFitoMBQimopQbh/HvasH6PjN3oj0FT8q3o480ZkmCAaBL7pYffakBT9CeA6bJFp/RNMeyaABAyINjpXkvGBhTPGTVzXqmGnI5vXDPbjAIYSTpc8H0IKB+EDDofhwF+cN0qqwdyRF1gg2hyQgRRPPCHCE8DQACAUouuWZcNVtIxG4zOs5JE8dh2Ap20XVrnvHV0VprBGsjlCmIupOmLrsKczsiMRZOXUKBISVB+IJ3SfEr4ss3sFFuxWdLRS0K5K7geyBmuAqAkM4AvReybKZGUDDoGCcqkxouShpQBw6G8HGCObQOFDI3+LoASJgAMBBS4LAv8M9g/O4AFRgHgApe/ionGLVpHhQN+KKUxDiRnqjsmE6ND6uXW/Z0AfFKSCzvECkDFTYNtABejAF3of8HfCcyYUKSQQCO6GWYUsyXB1wAUePytahwzCp80PiHUozFUUNTG+yzux7JxcXsbFPhBmSYaeeuRhEZ4AIyW6uIMir3afhMEsuTIzQGTJTsCL4v90rhvv+SDkLGBVjcBirHuKQWuMJTBPdZp8htlCOygZAEFUUgHFXzIcRT7VAsQCPONcZ+wD9ynEeSuMDWhwLRaL+dar5mL472PwNThHGkqn42bpIFm8AiCHOmEW6p4OgJwiqfH1VgBkxuyU/WSRufSFCSd8rE+BWbE0t0yBmBIASrzQc8SNdYQLn0ugHkFuJsgpw7lq4KzHEyfkapZSU5pUhxtAUgRHOiULJgj3L7G3nFKi90qF3uvgciUVqwPA8APSrERBssMAgxx2MUj15+bC5PY5l9SuPgT/Hjx7Z7rtvzQcXT7HmnAPHdH7AYGCKX/UilI2Gwi+MHsLhTcACbEwGzj99JGivulR2opCJLJXJMHXrm6AH7oTI1yzI9yR5xzZDGVBWXKYrHIbzZ3KjBiwN0FizON/XHl1H4FfgSDxxbkn/R633LAh749/X16kFCOi/4hZJYQWOl8e5+8eeiSAj4oObbR/OjbKPXs0D1cyu33el9jnvLAoZkj+oAwPxUQZL1ZXP/yZAkeOnxU3XzSiLunF919szmcg7sDwOUeBdBUcfLESfnJT9+UK1fOwQh8bSyItWaD8lXtbfsXFi/3eeP5GYIUJT0ybjt+n8oRwwpmhA5K8OeRI3cp4IHBgoEu5VYTrJeHHnp2wf+D71HKSv8O47rlxteXWwvro3R/uQ8ul08xq9YTXC/gr1H/1hNb8dA5OjYoXZc/AEtpvzQ179lVoNx6cvNJWfZmYn4w55S/evW1d9T3GuWvKPfHDubtjFvZA6QCfiyeabz/27OnSQJoULt65cy6wOztPF8r27qxDNAjkA0MtSE3jnfzjQ22y9bmOdzY2ChtkD+6cOHNLfP92mW7fctNh81y9Iezor7E5qOlzVBzaCw41zMhL78/LOEkWMHz8n4B+OR+5lSLdDZv7zV26QFaOt+l72/G3yzYL40b8ciYKqIWUHVEbIXrc7eUZbJ0m1v593Im7xvdZs4Cr+V8vGxtzF1sQn3UjxZprV660W1U1tu5DKwKgPAhtqUBD3PBqMwA7CDTgmBDgzslU6MJiUIyydlWL7cf3QsQgA/HE9JxWxAatqMSHYZkEj5zDegsT1HSxJlTrIg41g8FAvB4gIxDcAZFcD5Ym8VcQ5YDuu6jY3IVN//JR7UPbGTPhFzEg7F9iJ3+dsk02eTUTBPm0iNGgC2z8NjI5+Ef4rdjbh1izkCWCgVGD7xHUPOVFDocU0UDWA0w574MCS6AMvQSCYEBcWUwKvYMtp1DB2UttO+B9TGqbFMAOKKSrs7IXjhEDNPAHf/oZZJEsZ3Bwn4KxvDDZzQZKbJPfAAJZufGVBGfAMlkPcCjvimAMSgbonBDsILhzFjEh7y46/crcIFBoGPWh4K8MyVRFP8Nk0Uxz4Da552SAsALBST5gFZjXOqB5ascyuy9NGbz+DICuBDrh4wX6vAEDxiU/wr3QHbrcAP0v89LExg4VzMD2IbmAcJl4pC4qm3ulDnsM51d8hkAQ1XYl1q7tBvuV+Mk8J7gAjKdBIsAQJBtb5P6naBGHgblGbBoGA4APwpgAZvDFWiW5BzM0eGpMREpKPCJ0QQT8Svwc4mDCeRB7cIQDEGyrFVsKZwPkMAanouLaS4BpkZepiFDVcT+85iNhWCwZIHpu92DLyZ4oECfLz0HcASgzuxsXp1LtnhBhgDGDHaPqdxRwmxMINGGIKtG5k3NPZgLARIycZJXu6SIc9lpdSuZMyeOV9JoEyf8Ryag4lVF+bF0WO0DZbossbhiyNQ2WZW0FyXffC04Byg9hu2QPeTDcmQE5Vkp3eEwoXOXBuh7aq8H43gTwPd2S+w0A2SrwA89vzMzYXwvpTYF/NDHZPc0jdAbuq5AOfCyGAa6FJhQCn7k6yG3t46DrBdVKffEgqvR2C7jACXOfulpgBRdaqRWY43k8Z4e5QqxK22S8yYIQnCGYX/1BfUz2XNB9oSnJPipR5SfyEY7tXljyTmVenZMzwNPK81rre/pYBFBEJE9ajWCIIVCozoe6y0Cr3W7t/JyBDqa8FDbjGs/zdAHBwdlCpJJfNDdLcHO/1N3n5LLXZflvfdOK5YGGRebGWv9juyWgwAAIABJREFUrPEcNBodCqhjLF2Pvjz8p4f+Ps9tghzl3ivdD45vMi2OX24b69nvpfNby7obWWej3ynLzScCyVP6qbCT9pGHvojvTO1ecrnlK69vXQbKMT92s+wVM0FAbmhwQK70XlByHseP3wF51Tt25Dy6FT1AKuDHxz+PBHHvuP2wvPy7D5T/02Zfwz6+xcor25kBPut0d1+QmdwwGjYewn3KzhaCt2Lfg8Gg3Hf3CXiL/U4GBrshK3hsw88TWzG/ypg3lgHlHYfjSrnXp568QzUfLY38XFFGp9Ly+wtDMqUUQ7TY2+SVuw9rsvNL19nOv7eiGad0/vZCnVh17+JN2DGyU6krM1G8IE1Vd13HLKFnxnYH5yNLlN03Yw75fE6yRdR+ywSBnooRepnE3EQvrQqAcF+CrXgQNuyVy9kP4J0xh4JUFkbYNTINo+m6gwelONonvTBKdx12wE+jR8kS0Q+j5nBI/U4PB0vErLwsLIUZ2YuO4jh0nGlyzSp9R8deKcwboHN7bodFZt97Qy7mAKAg6hva5Kz0SRFm0yr6RC4F+mEIjt8XntvZ3QhWQzU+BUdYzCU3g8Gf09Is/TJcRLEbRRO6qo8ARBm7mJIwCuxTLR65hk5+UkuarFWSCgbECY8Qe0O7OEZ6weZAFz/YGYyAAf4RAArIGHGi4G9yzkrY3QRJBRgGpybFUAdZlNZ6MSQDMjtZJfbxvNQMQNbKDdPiRi+Wga9FLeY4o0lLkQmTiGi99wawHGCNIlGwT7x+r1T5wIbpm5R0X05sdZqRsNomAAkCHKkEzOnr6AUCkAdmqE4UMax1NrFccorXCgkozDM1BnaEd04BPnuO3ilFSFt4fUbMJy/10Er80FjyrWGKyAS0sSOmLEzqx+GtUS1zZOcgFxOpHulDcTUI43vOgVJYMemBzAcYIZaMAjVoDq4fEBq/m2yNCvSIACAhu6XvzREYq7sUoEb2D5kpihWCtQhKpIfikuuN0hZGXJ56ACdm+G741fnjBiAUBFiFA6kFaBrTc9D4xl9knszkDPA+AQgRGVNyZlKrFR8ag51SzPZDWm1CfCaTFD15IdtlnMyTeWAkdW1U8jW48cM/gh9K8gvrx2CqLtj3FIbzWEwKTAov8c/QJbNIg5lB/sdiYU02jB4xkMCq9qIwpKgqOx8ep0WeuqcNRuiQN5sPM5gyZF/xvd0SO8kAoedEJjOumB8uF+VVatcs4bJa/nQ/EY6dy4JFhO+NzfKM4A0UPT9cP/vbBb7RUvBjtfmt9j4NzSkFRr8Psj+8Jebmq6273PssXLKASk8SHQShp8jjr14SW98LYJrBZ6eMZ8ly4y19XS+MEsTRmCCtQgCEAFQWbD+yCDajEMrt6CBRTHoXjOkrIMjSI7I5f1PWoKO9SQEgly9dlImJyd0FgAB48+H6+vjjj8s/fv/v5e23fiGWBz+vGBM7FcuBBMu9rs9ztfe53FqW2an93o7tJsGIfe/d0/AvOQvZsS+ABU2psPXAzdsxy1tnG0uZH7tZ9ko/KgV8iLq7e2WwdwRSdXY5fvIIPH5qd+Q8utU8QCrgR/nvBnpu3X7ypFy4dE1dw+wPP6eYdrf69335bN1crxL8YOH4HXh4HdgfkLtO3QU5oN3TCLdZ2WTDzPHbjssHZz6QD94/jWc6TwXI26zk7vA4fKYeHulXx3VPczV8604tew773Q54tfol6l/0zm1A/c2B+sdOx3bcKy7HjrBBDsWEZ3vlebHG0JedMoAFYgALpMQkPGEYkpwZGjRa7/UaR7yxxTgfhwH15oW6742Np6/NcSkRRqbMUvkwLrOR3G3OzCqjbEYG1gSAmA1+JfvjbwrANQN14irItgDEqG29QxlEn/sdQIbYJfEkoRF9771SZWmWgmNQmYG7sknpheE115mEQfYk1s+bYYqOnwRHfL5OMYY6pJi2yWAmJrPxqPhgxHXMuEc+/O0rylDztm92yD6YcjLIxKDuNSrN1+1/NDoh/UD6I0BHDGNxGQGbozQ0ISOAKYaAjDXPSm0fUF82RB5tU4u955lQP7tcRI8xSyh9NZMNEjwogwpAIXCimfZKk9YlASgAJr4paTjkFGN/SvKOGRkC+JmyA3AAe8LdDrbLG+ck9kFEWm/bpwAKztpu9Iqzpl7A2pNClVtq93oAEIGJQRATf1Pqywa2g9kIWTEsP2eHDI1dq/ybEmlxQq4JlXowQgBQ4H16i+hRKHgUZX4cYExamY+DuYCxfJYmSFINiwfG4gFPC3T9EzKEXb7XmldMEbiRkKwDsCApLoA7RoAfBCdoFk9NKMp8EbwgS4VBgIXgDUEWJRtGjw+E2zQjb0P+ymEIK+YE2twVCOK3uaVpn00BYxEUC0N++G/Q2J1jOfbKO2dPKzkwKzrKfUCBwpFxAD/wBoH2eN1enzTAePUxh0mBHf25ESVHpsuCqUEQZN1wFgnkSMbTak5VAD36AX5Mj81hLEiWwVvE4Q6BDZSV/pG4fATZMzOWsQJMcrqzcvXCqLQcOSnVcxaJGiHBZTeD4aIdFzs6aH3TbpmBFw5ZLxYcMDJAxofg9ZI1AbgZA9gRws3VITFAZiycg7xZefBYn/K2/uTNbZ0fmLVPBwdxbsxrFm3HBXitO7tTDBA+EMTmPTm2CvzQJZg8kF2zoGt6M4rvzKsTxTeCH/TSGHlEMyvv/8ozMnD0cXxu6yFtV9zwtniKUNuekmCp+CW55+dvqENJEGTGMit+5O1GmA4cnw/UUWdA+p58XKKRLsUwIYOF/xg3AoKoARA8xwmCeHNNOM5DWyJXxW55esVUQBA961vzk2yP9o4OkdMvQwJrCGbjIzD9vm1rNrbBUXm+7du3T57+3LPy/X98QX57+nlo+j8iNBBfqy/GBjddWW2bMsBrBmWvyPwg+PHUZ5+SO+64fVfJsW1TKnbFZm5G5oeeuLn8rGKMDQ0NoBHKLMePHN12+SvO5VbzAKmAHyt/dNv3dsoTjz0oP/7xj+Tl3/5AHgII4vMGN3w/ufLWKu9uRwZ08OPVV36sCsdPP/VZ3BvvDqnmrdj/2rp6eeqpp+V7f/8Dee33P5L7H0CTAjzSNuv5ayvmXBlz5Qzo4AePJ70/nnoK4GwZ9gdHodH5sQ6/NNYcR6F/senXisbWAJRidjq2kgFCdoSjsPngQLmcESiIFq6hXrAor19uuc1+bSUGyI2CFGmZELJaSkEezp+AiNfYIlNg16Cautm7VBlvGzKwJgAEvuWQGpqRPAreURS8GZNgf7CZMRVrhuxPA06OIXhyTOJ1AhvTCwbV4ZkijH7zMkxvBXTFE0ThGJ5kAcu1SSxvFjekApIAPsLJrGSnZqVqj0f23n9Ymj+6KpHJKbnvUKvsddUoQGUffNGLAE2qvVkZnoiqgnoVGBwmmHvHYZjtAcMkfrhaPurVuu4phTUzq1Gyqs12mTXhRCUKA74UdS8//OU1ue0zkC3yaYX9ocg1tX9TNQblo6EHfx/FlywBFP3nrwikZGvlw/mFLNXz0kLoWnZBzokQaPKUT+am59MM6S9vA5gagaCYExEpxuE54YVNOIrzCUhXzdKAnQV5rjk9ju5kgEFYh3BL3lKlwAmGAYbrgmJkAUbdTscimp2EzBjEnSQD03h/HQGgXgkZ/TID2aN47LwCWOZghE1ZpioT4J3RAQlERgAkgPlAs9L4NMAniwynzABlmsUO2TFKSeUB6FD4X5mEA4Cgr4Z1VgMvbHurxRVrVCwLwi1DOP4hTJOeJzzezim3kpIie6IxFFCm4AJJqLmEU0YTmhl3+OxvpWCMK4DFGfIpcKT38rDMIS/uVo31Y8sPiwXb5LaruqtQLMV+wF8DmjxSCDbi+BHNAvADuEr3T2GuCNAQAKpvdWiAUBpSYi7Ik+GUIIhDm3OiJoo1wgA7JzqknQP80+R1IpftAO3cKLSdBzsAhvfo7DWnpiGDRR8aSKrNjkoRIJStOqSYK9N5u/T0QGassQnbAYvn6lsy+5nntPF38P8svHUuD0Skf5QzX4z6oAM3ByGxQlZsN8ROMEDI/CgFPzazc19nfih/CGjoE1xJZheN6G8k53yQcb//8gL4oY918atPiqn+YXzUwsqgm+bfG2Wb6OAHmStusPJsfVfngYln5PeHhxSQcyNMGd78peAVpOfHivwQXLH1aWbr9AZh3CgIQpClUMB1h3I98F5hbLZnB7fB4nYFBLmRs3r1dQnyt7S2LviATExQgrGwbPfX6iNuzRLsPjx5+x3KW+PFF38uL7/8Q+lrPQwPk7uVpjoZSJW4+TLAB7wo2J4XL7wtV65+gO/0gmJ+EPxg13QldiYDS5kfu132qjRLMzMzSs4vHotALbcJXdkHdqQoeSt5gFTAj9U/p2ycOgV2AOOFn/5Sfv2r/wp5x6dUFz3vdeZ7qFYfqLLEjmZAZ+3QL6EHjUWvvf5TBX780XNflL3w9txNTXCbnSiew0ePHZM/wcAEQU7/5nm5594nr2tGqZzHm531zR9PP4f5vH6t77K8+cZLSnL0T776nDq+yzGYeG57KDmPf0tjN/B0t/KzRxaDraq8tJ1iNWwi6YtAAQGD9bBJlh6PjfyttlfmQG7G/tEHJJEfKmuEvhyrZiP7UFln+zOwJgBEl/XBeY1u96z0WoeVp0LcVQVmx7Ty/piWJhmBUfpsPirOGXQLo/id7kkrfwQG/2bQP8FbmFMYhBEPkN7gPrEkZiSCrn1DRivMGlCdjiQNYt0HlgH8CVgsvzQ1AsNtFA1np8TZjE5eFPLHPuqC5BP8GgCe6DGW96NgDV36eJ96yQbPEfUThUfG/Z0Pi8UBP5O+a/LPI27FMNnrPCZ/8MC96v0opLkmC/3qd7JWGARaOP9Yqlpa4NkRh2/FG5Cgum2OIAd8QQA4RA346I8NqOVpAC5yTflgXEx/qMy7O+8+hMKbUYqgXzIsYDmYPB4JVJOpQeYC+GKzuJlsbsUcomKzZKU6jtfBYrBjOUopQYBLunr7YMw9LK2gIU87fHJ5OKxYOaVB/d7xsV48POXFDpN4H9btH9FM6D1gpdCTwgj/ioHJMOStUOSPGWVvQwggVb+8ee5DJee1z3tSsScKsSGwUGbmmQ5ZHM8oQCaMAfAmA7N0YwS/5yfFRLAD8lHZuR6ZiwBUwDZTQ9QUI1tHm91kpgZSXTgWqPkQmGBwjnOQ1zp1z+eQ524xwsjcYoLGe3BWUmCpADtTXiEEX4IWHEMgUR5rncxC7oqIDte3QXs70n8VshMtiuGg/DnmDdlJ4rFPoyoJnxEHZK3IGoknxlReTVkDWCYRnANJMTbukWguJ3O1+BZF/SIYNStQYwjSKrPwWrFYJsUP4COPLr1xdHxaqh0K4Ji1ZiGtZYV02Di6Pq0A8kZlpCsqsfCwAkqmei8DfDmsfEl2OuLpGXn1w2H59Xt9kAzTGFLV6IC4fW+ttNR5AIDsjsLNdjNAtgr80JkTmQx8bAB+UD7K5dCYbDcKgOhjLwU/KHt1+fDDUqg5pUCJ6uqAAkA05sn6QZClkmABMLRE/o+FU5lAAoERxkZAEIJDicTggtk880P/nMRtT0rc0Cny/H9QYAtBEMPAJWWYTs+QjTBOmLNCYZ4ZSHN1gELZ3NiWeHboTJCMSZNT22h+dvo7Y7dun/4KzZAZCqLRYAoMv6HhIXyPp2CgPd+EsIsmrj98+/0+eeON1yEn0gMw5CKYlz5xOP1SzetaJW6aDMzkIBGaiuA+IqqAj9vv3CcPPXA3ALm2BeZHIgE+6OgIjq9L6uvRaFGRw9rS41uO+XEzyF6VJoXnS3d/r3opADZ7fcPOnTe3ggdIBfxY+0eSQL4Ogvzqn1+VX//6eygeH1a+IMFASAH8RuqwV2JXZmAOBcIMnn1HR/rk8uV3ZSo8CoDVp5gfZPjw+sQGEioCfFKvVaUgyC9eOq2aUboaOmT//hMwR99TOYd35Zm7OCmew7OzOTBuB6Sr64zy/Ghp8cpnn3xODh06qMAPNtNRSnLpeaz8tSZT0g0Z9ZlZ1ue0cEL6vhMN17Ulqhg7kYatZIDo+1Mq4VQq6eQ1wBIAjVgbAS0IDuTyaIpUbdp4bt8B+StudyUGyI3sn547+oCUk8GiAXyT8ZT0wj4gO6M12u/E+VPZ5sYysOodCzuMR3pHJRGLihsGWUV0D6RyMH1G/TkGFgJv12muXQtT8/FwWGohnxSryYo561brFB2tYg3kJQufBTEHUTRHx3w15JHQ/eiD3FN2DJ30mQklfTUN9sdMblqKhgLq3Jp2UNZvlEH4OtCvwSRGmGm7xQBpp9QYiuwYL55zSWwOfhMo4pMtMZyfkaItKVOTGiXJPg1WCS78s5BHYryPf7d37pFr4T4UsudQ7HcomSXqt7MzgpHDPMgs8TTAqwSsE/u0XYwAUvajdpmDb8kcCv33tnSggNEi3niPVHntEpwDnRLeI1yeQIXHkZEBMDTklR6wTK5Irh0wKxgzrjwYAVwkHxCzNwdwwYGcdKvtMSxmAClWi5z0gssBZOPitV502EzgIfpTEkNnWAiyWNEEcoP3vFjWCTCDklrmeaCn2oxxfF5IXNUAMIAjBz0oalrk1CGAO/0jkoFkU2Nor1wZmURhvlkBKQREuFy795D8/+y9949k6XUleMN7HxkZ6W1Vlu2qNtXdbMNms0cURVEiKa00I2hmFwJXwACL+WHnhwUG8wcsML8uIGAxgBZYzGpHszIQjQw5bJHdNO3Y3eVdehsZmeG933O+l5GVlZ0+syqzqt7tio6MiPe+9737Xph3zz3n2DqtCmiiCbjzfkaxRwie9KLQ5Le0oWIwOXwJsD2mMeO4rKZyMjh2Wh2x0b4OmUeR1N+sqeNBmkQlo9Hh8vlJJZtF9gXgMcW6oIeGc+Qs2Dt59Xdmzbj83PDL4jbCvwQAhGKe4LXp7F0FsnT2RiFj5VNMoioOGYEYGodMJ1dkEIb1TgAmS8iPOzYtDZiq06xdSYwhSqACFiGTFgLQUgKI5xosiKNswjE1oFjRCaktzRCrDD8SC4ANdy88AjLwhYktgdUCbxVIiCHz8KOxSdFrlFyWDBxIfAU0YIvLMe/O3lEFfiwtrEj3GST/hESpXJcsgJBqTaOB8sdAqQag6gTF42SA8D2fK8yCcZVWzIyjZH5ozIaF9eI+WQHsmGt/zhwm5VV4/vg/e/8h5gfBj/i/+0+S73YpRU56dnB7YTmtgSBkoAi7SvdmKr4VMCTFB+wozp8m6AknpProa1IvKObDXuV9tjKb55wZBIsW3/mOlqI1EITyXozc7/ypZF98e98gCLuH2uNzHLvdidujkatqM0F4Pqk5HyA/akU9ts2AD1KZLBTGYvOSwG+PXC53IgEQ7gAvztiNHgUb8sUXpuQ2mjfmFxakUFhG08iuP8O2zYH+wuPPgNMBX7ewBwyey3Lm7JiSOWOBcGPU8J2axm9FdWGmxyPPwJPM/GgnZ3Z2Xvl/kBV25gyuLzxaA9YjT94WG3jaPUB08GOLg77LU/yMe+ONN6QX/lsffvyRAvJ/9KObai07GsL0ONkZyEJ1wopjyKLxG69/SV588QXpiGgeQ7xWmUWtwYRGvSeJNbffjPN32KVLlySCGsGvfvkBJAdvAwiZ0M/h/SbymJYvo1bH6IjYleH5S1deeqjBZG5uVmZmZuX8+XMSQk2yHVV4txL8+PN/vIm6pWZO4YDiBRtmv/vNi8cOgDxK0JG/Qe1K4P5BbARDyGIIVM/IiunavkEQMqHTrRmxGlHXA/hB+avjAAK2Y4C099haDaJKdzCAgvtDH5CtGCYc39vqVflbkk8fyrH+4ORnYNerM3baRkfHUEBOS6VSlrodnf6tDvhU1CQSHhMbivWJiR8r8OPM4KDa41Q6J9myBpgIJIfKsRlJr65CL94AoAS1f1wr0jC7BSZIDBJC9YVuSEPBWwHgRzaTlB4YhnuMDzTkaMRtCECKCXBBIYWTGJV2jymrwBCoJqmg0XfR2SGdkIrK8YseBc3Y2hDJTBr0tzEJDnthbJ2UxeWAkjXKZydQrPNILl+W6du3pZjJSLxRBXjQBVZGRm5MTEorNyPuoS50/kPKKg6texTisYcSHUR3MiItI8AisnK7DobCArT28TlTzUzJCurteTBh2sH9ysE7IgNpKwIW+CUC49a4YnowpgAAuVJJMQ94JYXC3FQyIUNB7QO8kLbJneVbYoeXCsEPeqRwrgwzzBLpLWLFceD4babBOXwBlMsVFHWxfygMFQA2BAZs4gJzg4weWnYsxvLyeXlR+nqvqOeiAF0oV2U3miSP45M3AFXAP4JLFeSF242DwnGjuAJPEqcyM7e7usQ/d08sS0UAElNy9fa4DJ9+QZqQrip5gQvTewSMFTI14DuswKy+IE24o2oOlOMyAPxZAcjReToo0XSnxIIwZAL4wSBQwQhiLtxja9gC8EM7sJRRy+Ax76XfISsVSK3NzcCkDx4ma6DJ4vhd5WVCvw6yTgxotj3V9zw6CcAcAQ+JrxkyUWWOzli5vaSYHIyaNQ6aUlRMAOSq8H3hVnleur0GsQAETOI8Y5gg35PEOUrj+MpKTKJgk/R0haSK7lANNNTYLmrhYww3/FheuwhjeoBaG2Okyyd87aTE42CAkA1Qgk9LG/zw+fYODOwlT1sV9wkMkPFwmDCZmiialiR07VcS+T/+t3WPDPp+EBhIP/8mWFK3HtqEDZ8RYd9pta97lXzaCvwwg8G2VWws8tP7Yi8gyHb54fPtICi9FQgClT0V3Ne9gi1c/iEGyNoYj1quijk7SH7WpqffbZMBXjC48d3m8mgSUktLSwAx2VlwsoNFpNOQtqHkRLlcwm+VLC46HpzzJ3v2+uyYATN+E3vANCILabsLVx+ahc6cOaOAke2W0bN5+Axsxfx4Ugt48/MAcnGdxLhw4YL4IPN3HPG0e4Do4MfBzyp+lg2PjoJt3yevXJmTyalpNOZlIGF6Mq5xDr5nT/+agcBZCQSDMnb6lPJK2CwXRPDDvgnIfxqzwnOYHnLf/r3vQAb9JVUwjy/H9XP4CTjYrDf29MCRd6BfNRNtbjzh+cua11bBZk+CH2wAZVSqJrCvzWgGXVMF2Gqlx/Tco2SAOOFRYTc+8AnevEtkMXSZLkuhGd+3lwWBBxqhr8pdZXyuvD9O2OXMUchUUdYrb1iGFYSm4LGRDULj9YBxSFLmO8cC/mw+nvrjvWdgVwCEBakkwA8rgIWZBRThUVjOlG+I3dsrSRTBPfBT6L94Bd1wnZBAgd4QwlMsydyda4oBQnkhQmeZIgrxMLYuw3zWB/PuVXx4RbrQDZ2Zl86zUcVgKI0voViflFLZCXMZ7V1kh5yUAW8sWdJ+YBnwuAggpOtSF9gPKFID16P5BCW5DDA6Wl5eFTuK2XmjXRwwVV8qoqCNefWMRdDpXhZDyilLzpT0owARCnXIcm1ckjD4/cn7P5MRgAYGu0cmJ+8qBkM78oq9UpP7ABhMAW1enWCasIDPgv4M2SkACfJRs4z5B1GITynWSN6gaT+54VFi9IKFApAjC0BI2U1U/VKFUbkDjIiwtUstX0GHKD0uSis2mc7Mys3JKTk/PKSK+s6kDcyblFibiwBB0CENpobBNSg2PJdHN7gb+8Cu/jw8J8gCyRujsrzwcwU+kDVCJgdx4GmAABZ2ncI/g4wGX7BLHP4KPDoo9TWupL54UpBpwb8tGbAgokHkGeAACrg++olgjJkblHaC2T1YPJaCBn4sZw1y6blXkUN0oFNiCo9zENZiTgkaCWhiXkiUMQL+DmXEnkrCD2RNMk3uAdm3ZZADyuygQA3fEJNXM3Jvzq0oG/oE2DlmI3xl0F1phXt7FfJklFPrgXcIe62rtqzEP/tAre/3Q6YCMl40Tuc4PDb0IJkGYEODeOY2CgZQrJYQhwf3uYI0LVkAfWCVwOvD4gTjI9WURsAoHsBc5gYSz+zAd8Qd9kprFXkAMuyAVFk1jnO7saJAYrKkVvPTiuFSQ/FZ2381pWMNB77sr5yNKDOwjcEfATQJOynxOBggBD8oDcV4XOAHt3VY0z2CH90/+VvxrbEiOOZG8GMrTwFuk2ACUEK1vwRBms0edMv3bTmfrcCPnSSn+Np+ivzbgR9qcpuC817+2h9oz25gghwEBNnMAGlvqg2CPAq5Km5zv/nZnAP98RczwAsGSkoFO8LqxQyaBzI57bvii0ufvGd4EU6vCN0v4uQdm6OYEYtLJ1GO7Sj27SSN8TQwP9r5TOIzjHJ+NECnvKrbfTwMkKfZA0QHP47m3cvCI4EQ3vhdTGmajc0rR7MVfZSjygB/g24uFm8cW4ECvX1HtbknYpw2K5eAuX4On/xDxnOYUns7NZSQ0eRH4wCX2xhmk0HGBoLyp799AQosWm3PCkUTDwCV3o6tAZPHmZGd9uko5rEbCLDb6zvNIdvS1HbMYLIfREZrp7GP4jUCFC6YGB/GqLxojKHSnAEAosVGBg3/9ho1FkjKpoMgR3HMHtcYuwIgnIilWUBRF/JG8L+YoL9FeV7mY8tK9ko8o8r42elEYRzsBAv8G1LpFVkqQRcejebhzjNidEDWamleTpP6gWChnEBKE/SNgq0HlNoxxV5wOF0oM4NtUVmSZRTGTTMwOQd4kUnX1088rl+FPEo6BxbFGsAgXfgAS3kkvwqZIry+tLSgwI2CMyiOBkx5im4FftAHxKEpkoD9oL1prac6pWiC30TOrorirjWmGOdDlgmj02+TKiheBntTsqm4+tCkAWY+5pUQPkRTSRT3IcdFIIDsDwIHZE/EE5iNZURGQho9OL2YBQQREEPVIe6OFrxMTisZKxblpxJJqUE+zJ+jr4hT6t3Q78djSlM5pVNJQ/mwbnGNGUMmClkMvGgKWwDQgAWSLBogoWWEFI1Jgl7kcvicLKzeQ3c/EO5qUWbAYqEEVQAAA8dN16xq+zGwTRIMC6gmAAAgAElEQVR1zBkXX5Szohl5GqwYhgXLcPaU1CJgU0wUkauc3JsYh3xUUJY++UDlg/tPdg9lrwiukDnCfPBvykLlAAq4IS/VO3xepldvSHJKA6qaYPYY80bpdgNQWJqWG40olvNKbnpck1TDOcb1kgAyGGnqvePGOBMAiAIGCCXLYmAFFeeWZfgitnHpeSnDo+VqoS7mlRWxBO3qecqSicsgWTA4ZL6qpMVof94AiOT3jaoxCZYw5pJZGXT3QZ6kAugGhutVmL+nGnKvMI0xU2rfDKFO8ZotUljWmB8CubYWbmkjWCpghOCsAC0wLpmKxtZRAx9z0PujUP6i5JX1BJnxPkoGCFkAWWi3a54YON7O/fti7HQIN3tmbPbFOCgDpM1Y2Q78oCSUBT8QdxqfYILJtLPk007gx07klXaR34jPSQIsZILQz0MDXh5kbLf8bM4t95t+Gitf/yOpdESl9P3/DADo79QtBTN2geTXXpkgWzFA2tsjCMJjxXgUclVkghBw2i0/m/dff7x1BnjBwE6voB++TOhCWlpOqSKMHnoG9Aw8/Rl4mpgfPFo0QE/Dy45yCy53BP5xgR0Llo/6CD+NHiA6+PFozhp+F+9UXH80W9VHPeoMbGaEHPX4J3k8/Rw+yUdn73Pb7jjy3O7v9Eh3+ItNBSeh+fNRMUB28sbYmNWjAAmOE/zgfjpaWzNmCVAETENolN6/xFc7R9y3lGFKAR1kzGwMskH43IDxTXHUAooRQ2+U45AC2/s7RV+SGdgVAGFhix3s1WpTyR51nr4oy2ycLsLcFSCBu5wTJ7r579y9owyizSgIs4vei8IivTX4uF6OCZkb6eqqlMH8oH9FGRJIfrAmnnvrS4qxkEQBg8ECWSTaJbMo7DcGUPDnpiZK8OMAIwCeHZQXInMhTX+NFrwgYNidR+Fjfq0DP2qAXBa2BaEn6cFcPZ4+Nfc8oAcGgZDcvGbaW7HmJGRCnz7MyonlzKLoXbp5D8br2rIocStPk+UCEWLKJ7kU64GRBwDUgrCUc3hUSUMRwGDMz5YAEKz5X6hnwJyARfzqdFqCr2InEA6TBRr3PpmZvSspqMq0ZbDcDbfUMZ96A4AGkuSJ9Mm9+YSSZ6J5d3GppGSmAkGwQm6NY5lx+F880PZzQnqsZi4j9z54qMDXBLhETxgGZ00NyOkoVZAXm2StHvFbfbI8+ZmkAGxcuvyqot5PVcfFaI9JNg5GibespLE64P0xeeczSY7fUEBEGiwT5qHnXLcsFFfl1HP9iumxND2nASGYFRknaeyfDWYYNTBQyIjguoxcelby8SWwdHwAwDQfCs0LBIVTj1layCsBE0qqbYy6E+cCzUOIw8LQPNCyKoZICDrchUBBlsHqmC+UJWQES6Xphi9LVnodDel9/TUAUTNqKPqz+FwOeWHkjGQ9CfW429gjviEwlwAQ2W2gVF4AcHVzBjkexLkGg/dUQlz4gIstguEB0APuJgA28B+OQxQsnID7tMxiP2zRPiyrnQPBPquSyNJktSKQy3iYcfHQjj3GB7liVf7p42n55M6ylCAXxnDYzHIK8l5/+PZpyMQ9LI31GKf20KYeFQOE4EAul14HPygLxQI9C+M7Fff3mofNxf2t/EQOygAhY2Ur8GO/puA7ST5t9EMhK8bp9D7ks8E87RQaCMLPozMKBKnA74j+I20QZCdwZbtx28eFAIKS90KQ/UEAhOboAhkw27/694olspsc1nYMkPa2uf5+mCzbzXmr57ltHnv6r2yXn63W05/bPgMsEgZ92vcfv7+yYAXqoWdAz8DJy0Ayp8lOHNXMNjM/djI8J5t3BY0wjT14nbnAuthKHuao5r3dOPl8XknxMoKBEK5rtKap7ZZ/1M8/bR4gOvjxqM8YfXw9A3oG9AyczAy0hafZALoxzFC+aLYMYtzO4OEx7c6jYoDs5o2xlbH3brtMsOE4wY7d5rfV63a0j1MK7DAskJT1jgTqQ+uG7+3ttNkgCgSRNyVivCDx2g1ZtP1KB0G2Ohgn6LldARDONdLRJfGVJdzWfBmGz0ho5EHhnfqfBkg70UUglYNXhBWV97ViGS8+skXIV6EzfrbpBBFJC7IMspZhuRzsE2XEjC5+htcXFBPYAKXKLUlAcqgV9cG3An4Lt2OQtEJxH9rKDAs7pQCE5OFLUoihcwryQwwNEAA7AxJYHmx7bm5O+YHwcb4Ms2oUTDyQy2Ks4pqsfH9RegfsYuuAV0VeY4VQpqvSXJPWALOgq6eqzKzHzp9eL/QTBGBhf+HqBwqQaBuE2wdOSWDmvmI+WIwhKRumoS2lNqe8P4owbG9kTbIKkoTD7pNcZglFsU74rMD7wwqXkzVfibnJONgzfZCDwjjMmjsuU4Xb8AUZktmVhAIbbN3DkkXaliCtVQEo4AxjfJNL+TnkE6tUBoOpWYcswnCVcmSKqYG51H0ZOY0ceMI9SiJr5dbnykCcj93IvQ8+LzUrGCFwuufrDDI70mDIMJj3iL9XASAEJYw1sCjA3lmG/0U7ZicmlPyZEwAVjxlBkcmFOcWySSykwZTIySn7S8hhTiZu3lLyY/kapHQAlBBcyqS1i8Foj2vdOB1HFwCND+OuSsAZlpt34M+ykFEgmAFAVW9nD0hqOLYwVC+16B2SkjvzywDespAHqyn/lli6JGetTgV+VNItmZKEdIDC7YaZbqaZB5sGxxXsG0ao0hQHzhvvKuAzsGZW3UtKaquShuE9mB2tVkQGnzsPSalesI5mxXDxomLH0ACVUS4AyMoDdLEWAQTu6a3WTt8juS/jfTa1kJP3rq6dkNgKGUNqrnhte5XIRzKdbQd9FAwQgh95eLW0mR9t8IOTOCrwI52d3NVMfSeGxlYJIehgWhyX7k8/+ILs1X7Bj/b4W0k+KWZCcRoIdHlbSTB2qWztAvJg5vwht17kx3iUGSMIYsKPpt3ysxvAQiZI/so7amMPgSCQxlLPwTR9M+Pkwcy29gDZ+Dr/fhxyVX4/32kAiTbkhz4tBwXHNu/Ds/KYXaft7/JSGezLqs4AeVaOvb6fT04GbsWTElu4L69fHoVH3+G8xvbL/Eigkeb73/u+fPzxhzsmjNcGyXRSTg9fkP/l3/6JDA6P7Ci1seNgB3gxk0oJvQoZHdEAroM077sDDHXoVZ42DxAd/Dj0KaEPoGdAz4CegSc2A41GUyYXM/IR6ojt5k/ujM9tlVfRzEt2yHHGo2aAVA259cI9QQ/6WdC8vNSChLsRTrxoHC+0NGWV3fJwEsEPSn/b634xNbZu4nW3OpUMVtEUOxB4Q9CHjA6yQOwmeOZivI0yWBtzRiCk1/gy/Rl0EGS3k+mYX99TVZbd9e/+4tfSaUzI4NnXpOE3wLsjpLRqycSIgFVGr4PE5B1VuK6sMSgoVcWPFYIdjFoTxelMWVwV+EOY+mQ43CX3P39X0ig0m+HBUAdjxFQCwMHq1oYIlYwSwAVJbn5C7oKh0dWjeUTEFsAmQIHa5a9JHCyHQAA+H5A5slXQyY/1rTawCKKQt5qxy2xsUUKn8WzAJQ2wABxpJ0zTAUb4HWCqgFWQv401PBI5NQgwBR4WeFRIY/8WZyQ3AY+PoFYUbwMwBAEKuF5p1TTTQk636OmQIvxSCia8UE5L1pGSxqzGbHH1gBEAZoaTJAZvA+wRXOiMXAbjJCMT+TkpZayQSrolH//yDiSzwDJBUX/x9k2VBUMPmC4wRvd6Q7JYM0i8FIMXSUQWphYA4mjeKBEwTAaHLsBMHiyLklk8obDEUex3YJvDL35b4gsfyuIddExz8/aiTBFAgrE5AYlpPEdpLEEOSbIg+GGpejWPE4A8ZJGgiiluACIELMj2cES1UmhhGkwdZ0V5pvwawAUZOp6RC+KtXZPk0m14lMD42X4G4AdYGrgYdbq0IlWtpLGKggBdnC4NGKnjHMoD7iGziMeYgI0bbCB6jmTJtqHpCqKHk0EYkSdzUwNKCMTw1aVqXErWCNhIYMzA8AoIGdgsbomlzOK25FCcrcsHv/61krBy9kFmDKbw5Ie4TCvwFBmSuSkNBCNTZMUAoADkkGyQEmqYC8zVHWCJjDotkrD1S2xqFr4g4+iUD4p3pAfSBfTA0SiWK2WP1LNRGN0W1FxZWD3uMBuNAHBccnE4JKWq5mXjsJqggemFr8rT6wGyDn6AlSCQwdsIfhzFMdkPs2G/RW6CH6Ef/D/ifG+N8YAJL/zJf5DEN/9YatERMe2GGmyzgxsln+LJu2ophwWeQDuYwe+lS+VhpsOgVuRfvabyTnDF4yHI2rPl+2EnIKr9GudwUBBkNwZIO1VbgSBbyXltk9odn+bxbzTaINFafgASHbUU246TeEpe3Kzz+5Tslr4begaeqgx48dvQ79H8AQ+7Y5uZHzsZnhP8+K//9S/llz/+gfSiseZUWPvNs3kO2apRrs6Ny69+dV/mJhfly2+8LP1Dw4+1K7S4BsDwQtuLZqLj9AZ6mjxAdPBj89muP9YzoGdAz8CzlYF6oyW3p/F74F00A6+ZoDMD/Z1eGYTk+nEDIHu5tj7IESNYwaL/UvNzFKFQE215YOd9Q1abdyGnDzCgCZl4oxkNutYTaWC+131W5uuIgzBa9rKNNuiz1PpUmZ0HqmeUcbyvofmqbh6D4AiZICXI6y/Jp5tf1h+fkAzsCQCJo0gcguHoAFgDBB2caY840G3PWFqKKTZGAs8zKB/FIjjZESx4x5c0QIRMAI/TI5UkCsJgd9tsdrn6+SfS0x2SzkBQ7CgiV41WsXa60bV/X+bTyxKCBAvhhXLALj0wRU9j3Fh1FgCA1mGfQIHfPliQCACPFvwZUnhrWwCwFH12FNW9YAOAFcEK9kBWRgc06apoZFTNE0JHUphMiasjgL8AdsRgku3zSKPTILWCXUK1KJC+ghS8CZzIWjfWVTAwRt3PacBPeVExGToBNFAWigBQX9Cv8pHP2hUjJZ4AQAGT1jC8OzzBkIQ8dviXoMCP+RuKGXF3+sXbf0quffwuWA8w0HbFIdsUhgG5Xay9TewLWDOTeYkvfiz2VljGxsIydW1Vmbo34EXSbIIx0ahJBGNOJq6DygdmSLVHbcvgHUAxM6CM0a0Bv1wc/Zcy8nxOPn/vr+CJQhRG82Kfvv1LCXadBftiENT7IIqTVviGxMTb6YSkCFBP3CYyk+Jq1aTlAvKJBj5PzyAKmTCqhzTXfH5BgR9kbryIMa0AisgIYgwMXVLnQHoJcjhWDbRSLyC6ANQUivelUIgqpghvZIF4wlGlhcxoRuAnk8rIahpa/wwQKiJdvcpcfHzprpwDWBVf0dhHWXT/ejvD0ipYZSa9KL5SSa1CxgjBHJwVCkRJr2bUc15IDBBYGQSzowxgg0tDvEz8rvMyP/FzbCikvFkYKbBEaPbuhCxZCSyRm2vP//wnfyWtzFnIbcGLBB17gcBpJZllAIbXC2CtXHbipkk/nASDQHY7fPlSr1wY1gAktXMIr9OqOiFOShw1AySbnVO+DjYCslv4Uhx0v4k9UJoqV5hdZ35sZyze3sZeGSBt5sdO4Md+wZTN+0lGRRlAHaNazgMsDYNBsT0TYS8MkPY2+IPO6w3gMyon8SI0MXMxiUTObAt+cL29YDkaiKGBIJW+QbW5dTmsPTJB2nPc6X6zZ4e27Bc9TXYaY6fXeOzIBLFYTiuWjMZMOlo/mp22/zS8tvmioc28exr2Td8HPQNPSwYIPnz1ubD4HQdvAtmK+bGT7NVG8OP3vzwqv/mVL22bzp9/8KlcvT2OhqyAhDq31pHeduUjeIHfq2TKJ1e0Zioffoc7cQ1wnPE0eIDo4MdxnkH6tvUM6BnQM3ByMuBxWKULiips+mTYrGbphj+v1bKnMugj3ZFHxQDhpJVHIorwhabG8Fj3p0A/yEY5q5PI7NhP0lPNKRT8sMamn5neVq+QAUOGy1HsI8GWFeM1lc+wcUyxZ7gN+qgwNkpikV2jut31OJEZ2NM7f+DFEeGtNA0TGejU0ouDQeDDbwTwgOKzp3dEFckptdSWW6J/hq2DjAro70KaifdZMEQIhlST8FQoJuEnsig5PBa8QVlYL5VbkllBFz4K4rgKkPBKS6zVpKSDkIhak7gIhfwKqCjAH4ORBhAQHEPXlKsLhfSHdcDvzQHkMCQUO4SgSCw+rkCSGkASL3CAOGS26ANCcMGJrn+uX51pSDVUUFJeORgSEm7hugEAIwl4i3QZNBo/Da+bUfhNQAKqigsYykXRmLXeAOgz0ZCCD8YiCEp5MeYXiT5n1N/1XEHJYbmxvHU1KZWVOKSZouI9H1ASU2RmELtx9aRkoOBQkmMs3KcG42KfRjkf9WoPTIGHn7ukzMkbzqTMLCzKanIRnioACb7mU6bpoSblViqQqsrBMNYhb/zWH8lnv/gnWcoTEMBY3l41H4IO9G5xGsE0AZDASMJPJGGJiSfvETvACCNYNBMANHisawGwcmAhEx1du2DELnoGR5XfCudDQMjdDybNuEVMRRxfHM95MER6YQFDgCwbS0nBVpRRICGRV95S69BrpgL2h6lYUOwPj31A5uIzAKI6xIvtmyBZZTVG5Pb4fcgFNACGTOOi2gWmDYqt4OzkYWzuBrAWgBlSoQyfFWyfzJBU+q7KHSPgH0MxvCh3F3FOIB++YFnslS7ImAWVfFV8GcAYzOLvTk4rcIRm8YufTgHQG1cyYHT5oLwZ5+o+86bMWnwy0LLDSyQEposL55nIdGZG0pk8jiGMlTE/i+Vwsg9q4kcQNALr7XDDCOyL+tInyQDvqDxACDrlAdC1wQ+/d3hXn4i9pplARhmyO23wYyfmxMYx9wJacGzz0oRifvT8X/+7Wj01ck6KX/7WOvNjL+PstC8b/VAIfPjxOVCBVCBlqrbL0+aC887jA7gGCJ4rJxSzhDeOXwS7jBJZW81/JwbIxm1xOa5fiQwJZcBa/edUnpQnCEAQy+LEofPU3sZGOa/KmpzXTjJbO+Vk82v1Os1DnYqRpIMgm7Ozt8dW2xc/y/a2pr6UngE9A48rA7ZD/gQ6KPOjDX64HF+URijAE+/jX38qf/EPv1IMla+/NioTSc2X7nHlhdvh92q7UYaP2eBkOubfjE+6B4gOfjzOM1jflp4BPQN6Bk5uBmh0fmYwKP/jb158aJI21NH6OrQmwOOc/X6urQ86z2xLUzdBOXA92oDAUQADB53XUazH+a+Yril2RpsNwnHJbLFW4cGLmgEZL7I1CXjfU+D26CfCMdvboMSWA93PftOAYtoQdCk1NQWgfW9AX+GxZGBPAAiBjzSAjyV0+ncZLapwXYTclWJ1oFCdQ8G6Y2AMElZFdFjD12JhWskYeexh6Y0EUSweUMX0XDwnqEGyHq/CDfkiymR19vQpRgGL50WAIvGpuzC4zoBBodHm+0+9otgHUl1RQIcfvhYue1CKgSw67uOSsi2gGA+fDASBCgaBDP59ug/m2QUNElxYxrugFpd+y4hEMYaxF0bdkJAqOmvr65H5ETgflApksQjcXQqgyJafBgtAo1gRPIGVu3Q5elCA14rqCeuqDHr6lXRUMRtTxfZoj1cm83ExxDIKYGllbuMGfwmwEFSgCAjLeKAjSSXBdfrUkHgCYEkAIGFxHdpNikXDaAMGXn9LzgVfBGpLsMAl3cNd8LXwKMN4sjBMkBqjV8lC9RqAlgvwHEnJvAnzXkT3OxgROU9UurpMEkWnm7EF2S+oR11+6zcUcEPApQUgx5wFoAVanJsICwBNMmY8o5qJdyyOfQDoM56/Jj2FUcwsgu2kwaaISPdop5TnM7LID1l0xhdbWYk2RxVAYs4GlWzW6REwhyidhe3SMwTiZVIzujSpK/ew1OoV+eijj+HZApmtBXgRAGzh9ihFJjAfZ1CIrB9gWJ/TJLGleTGGz4mp0FLMJBsAIQIl4/AmCRnACkogv2sX3imDdvzKy6sKqCP4kQKDaC4Zk0a1LN04px3Y52RsUm1HzElZmCtKLelUgNT4EuSwgMkpI3jIvplrRenr61M+KTPwplmt3hBgY2KFKXoqDdkteKkkIYe20LwHZhBQnxMQZche3ZkFULaUgUG19k1gg7l0FwCR80NhvKcOWaU4on08CgbIowQ/yFQg+MGi9U6eGVulYzcGCF+3xLYHP+pdo4e2a9tKEswCiaq8WQOL0rI1CLJXBgjHr+C7YGN+KFdEsIjm6PS/2AoE2QsDZGNOydIgCEI5MMY6CLK2UFsmbCuwZatjs91zmmfHA7mqjcbu262zl+fb83IAJOWYu+VnL2M+S8vojTXP0tHW9/VZzcBnd+bk79+bkAtvfVm++z//W9lJ9iqbza7LXu0GfvwS4/6X72ngx1svnVW/1yaSDyRtH2e+q5UHVQkywI8znnQPEB38OM6zR9+2ngE9A3oGTlYGCDB0hd0SDX6xYcqA1447HiUDZLt9exLNzLfbFz5PUGIzkMPHVH9RcYTgR3se7W1yG5QaY6zCbF3FEYMu7W3q90eXgV0BEBYSx+cg57TmsbGa0uSN2ibVDTAH4ujwj5c+FDIz3F7NqJxFYxbyadtAXwmDJalAhv7QS2r2CzOQRQH4ER29iN59ACEAPyzNgnSCfWB/52VIZ81LenkaLfud0uzISMIKmribptpedOAbpeE1SigWFWcvvCpQgO+3R1QxmhGAJFEik5M0vDjI+OA6JoAfFjBQahVId6HOS8r5rCkJL4es9Fv7wRrQJIAcoJ+7nPAoQWduAYbktXpZjHPT4oNUlg/gjwRwa0XFCvDHajXKdEWT/ko5FuF/Ar29oga81Epxca40JKFhB2BP5CXt19gf1UoIxf808AVIZqViKm/sZKUsmMUCfxD4bND7wpxYhoTUtJKHYr7pP+J3GJEnjwS8filBlimDgr2hALYIABHCJh54UYzhb2sO0lO4FbBuPpcWP8AVi6kEWSsNKJqbnVFMixxc0e0BFFXty9JRq8iKxSnW+SWx4Ph5jOhEdzhRMGfREsesNqGYNJKCSfoi6HSnIir32UJcAjmz1GH27TVr4w9EuqVe0j54eB6ceeGcAhsIRJiyGsuDIEgQsmFGnFu1ulcBMTSsJ5ARARZ09txFqTUcMjt9U5YA4nR19yhgqgkPGnptWAxWMHJWJVywAN0fBAKcxvgGdbwtLrdi7mQLVWXuzePLbZPRUgWjpa+/C+eayK1bd8Xe7QawUpHlYkqWYdjJ4PGowpOmL9QpFrMdjCGwOvIW7JPWIUgGTKo6L8llbUwCY5buiAy4llQeZqHpTACJ2+8MakCZGvgYIwPty/c+X5AffTL90CzevNArA1HfiQFADssAeZTgR5v5oYr7iL0yP9oJ36kY3wY/wn/2H5WsE4PMj8y/+veSfeFVIfhx2Nicm42SYPTmUGboACkIgmyWC9tLl8pD4Mem/JhMw2pcjt9sftELZK8MkHYOuDz9OuiFshkEUWwQxGFBEB6vLT07apQBPBq5FO6HxirR9ES3y89hj/3Ttv7xX7Y8bRnV90fPwMnKwGbwY7+yV9sxPxT48f9+TzE/CH4MRMDuBQBynFHIaU06nIPZvOul2SOb6pPsAaKDH4/stNAH1jOgZ0DPwBOZAQIM8yt51DJTUqk9qIS7oTJzbhBKKVCEOc7Yy7X1Uc9vM1hw1OM/a+MxnwSV1pk2TMAmmbFnLScnfX/39CubpuWBCHw4IJUSz8wrMKQT0kAMejv4gB10wly8DvAjMYHCvquh2An0Tbh567qSvgqf65YeD7r4UZUm+4DPedwdkrz+vlyDNJIHrAlHFxkKURTsK2obl06fUdtgQZvrFlnEBhODoEa24IVHSFheHXsVxWrIH0G+iMVyLZoynQJPozmt5JsqU0sKsLA7O2TKmpYEwIlLbq14ZU82xDXiFJ+9QwEPeRTYPb4ucUbL0gjAj6NqFtfZMYmNa14WpsCwdA0a1XIlzJGABwNYggoyUtQ9XjMQC6hrXfWxVhqMFs2oXcBssBRh7G2qoBA/AF8NDSXhmFWAGZSPMsfG1Thk2RAAoSk4JcBayJXAeD2Vw9xgYp7HY7Jtus+cU4CJAkYAdlBKirJSBEzMmZrEVm+p2+DYaTAyDIqZk4/VZOLmLek8bQKIkgHnBNiOkq3RLsRsXu0Yt8EhX6hHMg6HkhBjmPI41kS4AIKouTYhM4b7qisvqUIdoEJE/Q1hNDwL1gaYEF5LXdJIVgm3ykpMPvj+J2rdgV4URPvOK7YL9z8Q8Elw+Jx6jUFTSIJW3AfKl7WZPvT6qNlLQtcQnnutYE75hPArLr1alxAYSg4n9PkAVjXAQuK+lGtm7PuSYo2EIOe1VC3J3VWt8y8C2TPmmSyRHOb3eSIt3b2j0mcfFlvUrgAYlxfHCxtwQP4q7l4CwALGUKAoLZwvKTBi3EWAR5BGYwRce3qLqWUfR5TKoO5tMAGzWUxSLGueOo9j+3vZxmEYIDQkLxbxnoDnBw23nU4AWJYvyl/sZR5bLZPHOa/5NQD8cO7fs2ErBgiZD9VqRRyrc7IV+LH4znfWCuRbzWjvz2nMj+0lwcioWJd8AkhRAfhLZgJlmnYCbtozWGeWFKfVU5vzQ6oo5bUIgvD4MLYzRG+PuZd7zo3gUBsEaRvGt+XDDguCcA5tTxMyWR7IVXH+W8t57WXeG5fhOUAQpA0SHWV+9juXJ2V5nQHypBwpfZ56Bvafgc3gx07MD3p+/M3f/H/K8HxPzI9N4Mf+Z/f0rvGkMkAWC8vyIZQJAi8Oy3e/+105cxbXRJB91UPPgJ4BPQN6Bp7dDFTrTQV+/Pk/3pRKtb6eCDJCvvvNi8cOgBwHA+TZPRse3Z5vBSpt9dyjm4E+8n4ysKfqLJkH3UEAGga7NANgMUDWzAkfBBbm28ECP58XSBbR7yODQnd2+Z4GfkCayd1wKzaBBwXmubllCeO5UqygmA1mGGc7fJrEEU3EKWnFmAc40YvxGLYArfQAACAASURBVCGzlyQjJW1FkIHARt1Tk/fe+4l0RvvwXHLdg4TLj5u1om4EBfo86vl1AAo0Y3T5C8KatDPfJV/qdMjVlevKg2MOkjbx+9NiAiOBfhiUUmqguGXGwrBOgr9FQK4uJaQ+Ma72m9JMLMRTEozSYCzIt/1HOG8XClqFVkj6zfQpgRk8iAOKPYFwjkDjd9kOzw6NMkWvEYYHrA9mNI/tJ1IFGQGo1N8BHxHcpgplBVpQSopgQjELQAbbruDCL28Een1H63jmOGnIajFq9qwCBQgq2KsWtU8MmoHTSJxABwGWubk5JcM12hVR0lqC5wmEmLNuic+lAR44FIvEG4GrCKpOjVMXVGGfDI8FPO/zA/VcNmJMzXg8m4eHR/ewLINBQ8UvAgIlIO02gBGGTFZ5x3DJchYsn1xJdeHNzC+IDyBE75WLeM4pi9gfenUQbCATh8ckBxk2AQjBYK79cbBheG4CbCO7JDSimZ6rBRBGRxLzakEmbFmGXnhF+sPd6vnc7IRMTo2rvJE10+/vASgDGbEwTox5gHoAPu7ivIwvQrKnWJH04oz4uwcAoGlAVe5uSgFujIHne2V4+IL6uwZPFTbxEWyRJEwtAbh4MP8SmDj1+oMvXbXwMYTbYZHXLvZI0PcwIDACKTe+dlLioAwQgh/0sKDXxFGDH5uL+/Rt2CswsDGvWwEJdciR+T97Xzzf/89fYH4cFfixmfmxlc/HVr4XLPbvZV/X80OJK8hpcZ3NXhncd4JRjwIEYY43giB8TBbIZhDkoOd4m53S9uw4armq9viPMj8H3feTup7OADmpR0afl56Bg2egUK7I3OLyo5O9esbBD/7WpScgwwi/wqbJ/tB9GY/bUcKxSJfw+wRPqHsY2W+853KlIuR1My2hUf0ymqM2L9N+vJCqQHZXk/raaUwun0QTHH97Z2pZyWGOO0UBDUxzxQWZu7Egz7355o7gR70BmWU0mZFhw6aqnaICWTLebGiw4227YPEqn9fy6XZ7VLOEHnoG9AzoGdAzcHIywO+ndL7ycAMojNCrteOvzejfGSfnPNFn8uxkYFcAhBIjnb12FJEFoEZe/DW7+N12ydYBEMQ1hgDlleqNOcln7QqMsHRoMkFtM20W231mj0RdIYmlUbBH0bwXEkkWf155QdDQnGGd0h6HbM8pnwmal6fWPGSuCaSm1n5XEkjwOm2QNMrIT7//noyeOi3nz5xBsTu+Dk64wMBQY6Kr2RApSK+1UzpDIRmJnoOfw125dvMjef78Obl04aJiFlDuiAyUdlCCKwyfiBp8IVr47Rvt6JLf+91vy8TkhGIhMPqrIeUPwYuDbKsgNfz4v9BzTvKNiEwn5qQXkl30ySBgMxMOSxkF9BH4ayjZqE74g9QKMgtZqXIDGQLLhowEghUEOphTSSzK7Ipm9A6kAgyblsqX5vcxoubccX5AbHF0vaNoTzCJ4AzHoGeKy9aJbiitA6oGI3AvmAokY2SxfAEXQQS2CHQ0AKTkouBpxFPKj4OxDGCF0QAYsgSTeYJSkSWvLMcacv5sBl4eL2twB44lpb9qS3Hxdo3BAwOG5TCTt2YhaVa344InppZbhBSUF7kwlQGUQcKqUYUHCIyXL/W2AJSsHVh4vNBrplqtqbll4SXDY2MwAiABWBQESNYGmnjfcLYgoTYmPNvIYKwB7KLpeTuY50TjhqSmYvKzNY8Q13CnkuzydkA2zXVFBjr71OK1O4tSvhGX5Bqo5/PbpVQwgA2EcxrsJUt3QMrT0PeDybwDnh+5qradxdmmLM7OynNXXlfHtYbt5FspxSLpgS8OmSiGzK5vs/U5P8o/HPiyv3I2Is+NaEBOe1tmGITxtZMSB2GAbAY/joJZ0M7HVuDH5uL+XnO3mQHCeZ8E8GPj/PljbLPvBdkc20k+bQQ/bJA13CydtTk3VqsGghTNGlOHrx8FU4csika3xgSpdQMNhSF6GwQxzN5ShumlsPZ+3zynvT5mbgiC4NNfrbKTp8lex9y8XBsE2ZifozyfN2/vSX2sM0Ce1COnz/tZzIDNurcmi89vTMqN8dS658duslc682P3s4mflZY1Y/XF2zflx/LfxI/rkVJta3Ahh4akpeWUfHSjpQAOB4zZtwq+dn1ySebjCXn3V7dlYUW77tpu2bn5ogI2vv/uJ9uOyXXvji/IvVhCPnRq8rs+i+btuNW4P7r7kVxFY9hv/+43dgQ/uG4mnZLvf+/7MjwyIl/+8ptbDbf+3IcffiTXr1+X3/gXX5XTY5oawVYrNNG98MMf/gBSuTb52m/+hni92891q/X15/QM6BnQM6Bn4NFlwGwyyAAaPb/20qBQBaMdbAYN+3YGwh/drB6MrDNAHkeW9W3oGXg4A7tWPU1mi3zr0qDch2/DralFsfaGZNl+TZqfhxQjgOCGwe6Ulm9E3MrzIY+CL3w6YAjeCyMHH+SGGtWUOII2WQALowLPBBbwfUGPuvnBBomjWE85JRkMSrOcgYm4Tc7ZehSzgnEq2q8K2zcWNOCBAMgCjK4ZgyEf5I9Kspq+rySuzENu6Tb0wrDiUzGAidGJbiZr2qWYDiNXtB+8lI2qDcDDAz4PpXJLAQlkYfhhhk4GBMGDubmy1NxmSWfw499elnmwFih/xIJ82H8KHUQWccO4XflDQCpGACTYTQAtEM4qZJgg8SUgy5Al0gMJpQBN2QF+MCjhxaAZO8EfynutXJtXwAUL+k6Y4srcPVkC+NBmm0QAisAYBJJZMFSHVJfUTGLpcasCuz8G2gLAEYioKJCH4YaXSH5xUlYxhjIeR+TnbqKbyitxGKVTUkstB1ZMV7hH+VlUqpqOlwvyTd60Bo4QiOgCyBPPhNT2vM68RMK9Yk63JEvZKBQdHWmnhGEKT/CDuVAFf/yrBZ2SWgLwAWAgQLksI4692wYwLSCJboAVizYFXoRwzBmlmEfoMWPpBoBSxLlFZImG5ZC0snkG1LlGY6FURGPS+Ac01hC9X3i+taMZaIophfMJQbP6JeBalMUywuS9uJCXogXSZTgeduOgXO57AZ4kq/LZ/MT6+jxXCATxPrl0W6IGFHVt/fJ58apU2O22OCc93f0SOTWoWCkpAILX738ur19+Sxz9p6Qwex/nlkVWcFw6pFd50hynpvP6juGPQqkmhU2SVzQ/P0kAyH4ZII8F/ACzYS/F/Y253urvjQyQar0g3VswP+L/7j9J+vk3xQ62BIv6+/XG2Ljdg0iCcXuNhlHJO1kspx+SfNrK9SKbnVOSVnvND8dnkZ+gB4PrMhdkhhiNu34lbZVW9ZzGYNFAkMVAl7bcGgjS9lTJ/c6fSho+SQeNNkvmUXt2bJefo5RzO2gOTsp6ep/tSTkS+jz0DOycAXb0kx2wU3CZ2/dXZX6pLK//1m/sanjelr366Q+/tyfZqx/84McPeX5sN5faCegI3W5uB32en5VkMbzxxhvKa5DXMvRedG+DSVmCQXn9tZfEB3/CLJqxmryewDWHuq7YcI9LDumwn5bXOxPiRgNZAZK7Wy3H9dLmJkCtHvWbORvs23ZMLmsd7pOvfq1D7Gg2SoxCLWAbAIb5GB24Iv8C1wxvv/nWrrJX9Rp+/6qGtwem81vltF2QYp52i0a9piSHmQs99AzoGdAzoGfgZGXABCnEM/1BGYp+EZx2nQD1C50BcrLOF302z0YG9lRtIviRKZTgFRGUMIpiAchPLYeT0js2KtHQRVRVMxIrz0orlZCVZl46jCzChyAb1JRVb1WaMyjAQ8LIF4GR+JBDVkBvb7XAJOnqhB48AITsXRTbUJyeuC3ZqlEuXLok03zO2SV5E+jKVqfE794T+7RJvMMuCYOxQC8KSi91va79gu8NR2R5vgxL7JostuZRqB+URiUu9yYgrYRufhbyiyWtwM+ifyhTkVSa3ISS9NLEBEHwg4yOQjojfmMDwIdBSS+tmmC+bgWosxpX0l0B/5gybWckIb3lARDiMILp0TkvN+YfSFG1Ja/oO9GzBuL47FFJFTOK5WD1WMQfcYnDE5LQC17lmUJD8ImPP5Wr1z5QtPK+XqcEXWeR90ExljxiASgTo39FaUFqYOPQl6Xu6wCrxgLzcEyIzBEcj0Kajh4AWSCfVXOXFbOiDiN3AYPjlctnJF1rSgNMDMphCczHKS/mgwQXvTEIQnn93RIxaUyBClByD0ASRqqvKvdz92Sw3i/NkkMGA/BsaS1JFp1VvJGhQgZLFWJelrpHerrOAIxoinO0E14ZZYyvyX6ZGxpgoTxEcKM8mtG+Ko6ABmoEbJDb8o8qajl9RJxGp6Tji4p91G+/KClrXRr5GMAgmsab4WNRVH4kg4NuSZbAUMKZQAZS3Qtj9rlBcQKwCLa6JLF8X1xJDVhbDSRl/JNfisGN8QF20EeFgBYvkFygx3PvBy68oPab8l1ffv2SZJI5yJBFZAkgEsPpDkvV3gTTaFluxO5Jv6dT+gfPS7OWUedGHcDZcrx4IiSwcsWq/OTTGfnkzrKUKhoY57CZ5VRvQP7w7dPiAzh1EmI/DJAyPWsKs+uyV0fZKU9mw8bi/layUXvJF+Wnaui0tEAaiow6FXjc8YubEvlbjaXApxbf+ZaoAj3Aj3ah+3DgRxmFjsSB/VAI1rDQ7yz1AjAcV94n0bk1Rtrajk8T6AUeS/Bjv/nhPvJ4MQiCtI3X14Y+0F07Xy6w0CgfpmIDCLKIh51/9HWZ9W7f0bnXDTsAVD9Kz46t8rPfHO91X57E5XQGyJN41PQ5P0sZYOMHpUbJ6tjNZHx2UZNu3Q384Ps+l80qzw+CH994uU9+8ytfkv0Ynm93DGLzcfxeg8/eUxgEQC7h2uoM2PI1AAGHDTJKjmKc7eZBsIJhXmOutLe3+Z7LcFkHfrPv5vkRAvP73/ybf71rQxILUi+99KKch0qA2635+W03T/qC/d7v/556mRJYeugZ0DOgZ0DPwMnKQKPZhP9rA819Wu2Os7NZIOXeaOF743jnqjNAjjf/+tafzQzsCoCwuyVTcErdD0NcgB+dtj7xWFxijrQAeIj0jnhlHFJNlekpCdkM6BiCPFXEIXNTYCWI1i88c+uGyu6LY2+juB4XSwEeG0GDJGKQgUJr0kALxdhSUmIAPzr9NokO9YstVZa7szMyDMYJ6BVy+dSI5LswXfh2jPRYJIVC9OSdBRk1NaR5ql+qqzUl1dUX9shnM0mJnPbL+OfTMgNQ5uJwv/gtRqG/CMGK25MopAPguEB/DQTBgEDvELrgy3Ln01uquH0aXcLslHruuZfAakBnGszc+XOcMl9mU1GNQ7AgDakjD9gJ9H0IlAGkgO1RW4wrE3eGkmpywcA9m1aAwcYuIXphzF9PgwWyKD4wHowODRxYgfxU3eSVS1dGFQOkkZqU2x/mJDR8RjFPcgAaCiWL9J8ahel2XOqTM/Kl5y5LHKboWWpcAdggWwJ+3JJwtaQJ8INsGvqVEAhiMB8J0inWgsAMsYgWACDYt68/zzElG1OSZwRGOhp+qU8vSeY8QSOH5KbHFYuE41GuywRzc0sBoBY8YhiRpgsgVk75YCzNTIA5gi6yKBkivNi1KUN2gjBNAF08XfzWiGINNQGIRTo1pkeyBRAMAAlBrnwZmsCmpJhBfcmCFeSATBVNXQajF1E0NstyaU55k7SDxuyBEcwVu1EuL4iliflhHykn5m1aIc01Lz4TjO7BPiIIZ+0LShFSasmZVcX2YVCCK9LhkgH4mGRQ7SU7xHv+tDi7hsSGL04CJsuxotTAcHHbSjIbA8gC5gqzWMD7h5Jvj/JCcX1nd/mDX/5TCzl576rmscPFrThuDL7m22X9x/XyXhkgG8EPn+/MgQ2pCVA0wc4i+6ANUGxkThykuN/OFUGURHJZJidvyPlzV8B68CpGhffzf5Chv/2BkmhibAV+7DXf7flvZAfQwBxUpEMDQxx7ZvoOPt/s4gsLgIN5CNltiFZcbM7T+wY/2iMYjUZ8zmqfw23jb4JDAiDyMEEGCwGKa5ffkMLcjHwdgzHXbSbIyh9Dhi/83DrQdJBtEWzhOUPJL0Z7/kch57VxPhvz0waJDirBdpD9PKnr6AyQk3pk9HnpGdAyEAgE5Dvf+X25e+8evmMhE2t0qd9gvGe0/+a9H756/9OL35Bvf/tb0tWl+bVtlUeCH3/5l3+xDn58++tf2RL84Lq/vDMne2V+tLe1l67/reb1JDy3m5/Fk7APh5kjARIPJKr28t1Bj5DdfEI4F4IluuzVYY6Kvq6eAT0DegYeXQYIetyfT8s/fTCDZtkH4H9vp1veebFfhruPt/qhM0Ae3bHXR9YzsF0GdgVA2N3SjQ6uRUgetVCYWg7MAfgoS2txAnqvn8kC5JzC1i6wOS7CBC8vqVuL4gLDw+qDVBOW6y91S/PcBWmmyaQACyRwUYJdNnFkCpKo3JaO7k45daZPXagEgm7pvQhDdLwWS9sVa0HmEzLkC4g1Mko7DUQCoEkQy4pcQuHbDp8Fsk5YGA8qsETk+YEgWAAiL3/pjAJOYuPwEoFpdXTUioK0U/qDGg2ORfrBYAeM2T2KacDufjImTl3oQtczC/g1KQH8YNDPIZOEKToACBbGm5BRKgCIcENeKnf/A8Wu0OSb4AMB2asAkA6CH5R/ouSVBfR/slJSqYwyATejcz0GoIPMA2s6IqUUOqAjGjLdh+J8pxeG5B4YfIOVQDPwHPwwEpN31PIswHcN9kkLYIQFTBT3+YC4T50XQwHm42C5UBKKwIQWbjAr8OEOyTDOz1WBkSH2kaCPCQAJvSqywn2EJwhM0gmcpAxAC4iNAGSgTNn9qYryZOmElJg5ZBIbvDqyn63ANBwVUQtuCFunFX4aL0g1v6KAIjJQrEGXAo8oi0VQQoEwIHgQ8HA6A+IF+BEHE6fkr0ogBVCBXjFBsyzcGlcMmGjPKeVxQuCFrBsIlUndaYKpfVzbNUiBOfvG1N9eyI4R5KjXkVuAJc+ZcbJwIsA+VsszYN34JAiD+xpYKfbetS+7OM65FsZkB0Anjjf2d3nimmIduUNuCQZ6JTU/JTGYpjsb/ZKC2TqDTCGyZ7wujTnE/aP3Td6JCWCMYB98Y/JgwaDwzfPl7DmCM9toDWh78lj+b0axuQeMo4vD2vukvdHeDq/wtZMSmxkgLMIXi5Asg9Se2WzF+9+A90RKMRIYhwE/uD7ZGdkcWE8eSPGZcM7Cl2Me77d6axaF/d4DF/c5dhNdJ2l89n380X+X3t7TkNwryXvv/pX8x7//hy+AH9kX3xZLmyGi9mz3IMCSyYJpBKCtI9yltsfIJKfF54kqM/jDsGIIDF279gsJhqLyevdvg1z2nho/NXJO7r2C954hcuD8cO6cL3NvNjvUXAkiZHIxseDz1++tHAqgUHlA7j+Bqarr7X8tb8p/UTl3TN+XiR8Piuurt+T06YODIBtz7/X0gwXmkqXsIsD7koTRCEAJsa1M71UC9xA879vHNhTUmDLx5F2A/1Xp7xv9gsn8HoZ8qhbRGSBP1eHUd+YpzAALyG/Ca+G1119b3zs2GfCzbavga1YwFbYqULeZH3sBPwqlyjr4we289dJZGYjs3p0f7YU3XN/AVlPTn3tKMrDVufWU7Jq+G3oG9AzoGdAzsCkDrPHMLGXk/RvzkMun8ovW/NnfiUbj0fCxAyA6A0Q/ZfUMPP4M7AqA1Goo6iI8KDY3cWsBRW2HA14dqwAeegBO9ANBbSxZ5KYPxtzdBuUXQsDChboqQQ7BrZmIi9sfRCd+QW5OTUpgQAMi7iq2iMjwxXPicxVlevG2Yoa8cg5SU5msAlP4PGNlMSsZAB5h+HhMAWhxpLMSfesFFJ9gSA65rfUAmMGwhuHVUcbN7pUOtOt32FB+D6HL31EEeBFGMQm+HhMzEoDhRBysBBs+EAl+OH0+KWYyMtIJ82uzW1h+SiUDYrBBNuk2HsQWxWPKosiFojhvjFZUqml4VkBSyxjLS9OJoj5AGhNkqrL5imIFSKWmjMehpwW/jrAqpmdhkk6mB7KlgJI0vDX8kI7icoqVAZknwxpzg8CHpVCUFPYfliAofDbBWIFhIUwA2wbpHJMm5i101D0ffkFqDciF2ZflltyAtFMQuAAGBUDBIn4DiaYfBsGapr+pwABK2tBjZQEyUk4DpL+KcYBNZyUwNqQADkZ66Y7yEylmof0LBkg+bwboMaUkxCiXNR+/LpXrH4v1xVfEnAAQgHUs2bJkur0yaItClgv+IM2kJIozmA22DZDD0oDBsMkqsZZ2jhFoMcZCYsKxI6DD/XJDpkscefHnkMuBEWliDjQfXASARZmqiHNEgS4Q5BIbZc7AvCHQFQ4ald9I8Oygmn8RBXRL1Sv59JIMQeor3XRLvp6VEDr0BYSkkNkri5MAP2BqfmZQW4cAWbrUlA6yJnAIK+6qmKI2aa3mxADQqgwflwLOCYJC7kvwh+noWDdAPwkeID43ZLwu9cqFYQ20UolAeHGe8rWTEpsZICzCZ7IZNT035snPjw/h2cJOjjcGR7c1597P/hCoYrConQOA+E/8TPDY5RvhKNgPOMYHrLaS4cBgUYdBJshP/vuP5P1mUf4RIEJp8JSSvcpfeUdM/NA7QFggM7Ix7sD7qDSRkQsXKN/Q80B26wBjcxUyo9zwRSLI8vdXV+X/DmuGqJHb9+Wlnt+Qnp6DnTttcIC55z44ndQHb0ocMnkz5gSM2BNgXm3fCbyX3bHA24nzz5y5Ip9Bnu/5H/6f8r+OOGX+xk9kxGaV/v7RQx3fdu4pb0bZx0Q9CVPYGbnYKsmr+CxZlzzby2Q3LdM+d5gf/k1mye3JBZnAd88fOH3wIXIeCmDZakq2HbTWt1r+OJ872LvlOGesb1vPwLOXAbIOjiIOwvxI50ryrbdf2BP40Z4jmcR66BnQM6BnQM+AngE9A09HBjwONA7Dl9ZhfaB31R1yQQXj+L/vdQbI03GO6XvxZGVgT+/8xSZMaistJXG1Cj8QI0ydO051gnFQkulfwtjcW5Brv74nNr9LgQbTqFV2+E3SN9QrjZmWuEpBmOMlJWf3iDF7R5YhicUSmqccoU+4ks2ifUU0qnW50iOEUYZk0s3pFXnnj39LyW5xLEE9zIrtlmvLqqM/NAbQBF4YFp9T7GW3lO15qbWSYIh4lExWI+sG+AEZIgA1oVIdBtAVpUXc7OkBywJd+vW8rIAKUTOUZeS5QfiVZCBVRUDGp0AQPwCbn/7iPcVooMxTzQc2QwN6/ijKd59DF66rC/7nkITCtsZOnwbV/w6K+QnJLt2X2YZTLsg55UUBHgJQA7BocjNq3whyVKwGgBRpidr9YJ4MSg3dcoVqHqaDThRf4TUCUGZymeyPOQWQUEvZi3nQa4PhQT4tKOAn8Fo5CymnkZdkuKdPSWFFBYwFsDu4r8ux6zKzEJMo6pQFIFJx2zyYIIQdAjIw1g12hl+xHhgLkGvylUri6wmie7qu/qbdPIENhmJvnI6Iac2gnc/RVJ5BgIIGiL2Y9/DwmCTB+KBMGNflejUDjhOWS6XvQk5qTHoCQ9IN8OTOxHXF8OhDMbsZR+7xr/PiGYlPFuTG+7+SC6MBxTZphcAmwhiyhFxASqvHC6ks1JdT6PImsybSAaBtTTmHfir1DpukE0lapwvUslBkhTPJosaMISOlPA29ZwJFKKjXcVzJzOkAOGYGELOIvDLPNKZneEYuKIktmtMTQKKhPNrVpcc6CrNzgFHSqeTFYpCYslXviLvXIqGLb4l7ICqOigbGqYGOMWgE1h12SQTn18Ywmwwo1B4NA8Rsc+KzQgMrD7qrmxkglBkKhzowR+3jajWxIuNreuH13gd01oNuz4Y588aCPNkft9OaRFgqV1aMjcN08nPMLvj/vP32/6Cmd/Xq+zI1eR+MloKcxmfLN8Mw+wS4cAEyCgcFWTi/dpAxkABjbNEEJl0pJ11gt5DVctBg7l948R3FxFiJL8hnn/5CPv/sIzWczz8OZt4lOXv2hQOzEVjYD/g1QK6d/xkz3IKqdbmfj0so2HlgEIH5DIe71fxdYM79XeUn8h8APN392aTKfxbg7huv/66cO/fSgYAEzrede/6dwYdMYq27iPfFcPZQ4BzH3JgbAnOlNV30O9lV9Z44Kimsg557Bz2vjmK9A2KSR7FpfQw9A3oGHmMGsnuUvdrM/Ngv+PEYd0nflJ4BPQN6BvQM6BnQM/CIM2A1wwQdMud/+jsXpbKhTxo9cFsaoz/i6XxheJ0B8oWU6E/oGXjkGdgTALIQTykAIQOJKR/ADzIyaIpeB4OAhd6NMYWCc7vIX0jb5PzQsAI/uDzZHHfR3J8xmOVFD+R4AjZZgCwTBHzVEEUwAmzLdgW0MBLYFmWxGLlaQVoOMAgwjg/LV/HaMFglhoBdVvNpCeFDbSU2pZY1DmgSRx1GrKtqg6NKqmsOJuv0Ibl4NixX63fhR9GCYTvo7vDDTUJqS20nNi+eaC/2EfJb6OhNp5MoctVR8J4RK/xJGgsluXz2qzKdXFHgRyAYlLySN5pV6w/0+GRmMSbus6cAfcADhBSOBAuqNnGFMUnI1OSnltSyoRG/DHSPrTMVImA/xAEWcDmnt1NKKOK/6LXIJOSdOLonALkZmF8MjYyAPdMtH6AgGUtmpR9FejeYFkMAEBLYz4n3P1dSWc+9+paS9qIpOb05WKAfvvgC5H0WZBbsjIKtKJFlg5qjMWVU0ldkg4BKA4SmDnkoK1gTJYl09SrPjGx2WpqBpiyk02C1QA5ryK28S4owdCe7pYVCbldXFIXAtEQiEdy+KguLWl76nB61neRcVRLzS2Je+UDsMBikXJXaDoENxBTYiZQQY6QlLxGYl/vDNsU26bI04MGSVD4kgR6/Eu6ir4sbz5tXb0urWZZUl0OBOe39qZUgl4UxC2mLDIDJ4Xe5xNMIygIKiBJ1ywgAGLI6aEqfANuDYEgOEr4qNwAAIABJREFU4A5lxgg6JZduC7sIL0MyLDo6ptgtOZjGk+VBwCdjqUsT44wv3RW7iTAezm+AbOW5Fentb0qqtCIm38noVSZj4ubUqkzHNH8aNVlEFyThXhgLi8t+OJkuslwoC1arJcHC0UzW29vYz/1mBgi76NtFfAIULP6Wqg38kDl8kZzz2ihTVC7Dl2clK6PdYIkhX9dWF+Utf+hQUkweSNl5vQGAo1cVgMDie71RlwQYcT+49YEMTt08sBTTxrlTVoTgjQOfR1/BOXkPQAjlpQ5TJGfuh4fGpARpvZ+//zcyhbly7gzO/7PPfi5feu235NTocwcGEdrnBo/tBECboNshfaBhzQFEyPiTkKLT3lf7OYfay3Jd3qam76q5fjY+sT5/7sv16+/LCJg4B81RO/9KDiuTVADFxeGIOodWkR8e+8PExvHbx/blM11q/MMe243z4rlTrZbRLECPq2N2BNxjwk7Gp+rWk+UFTRPnhBEg1nbdXXXoEtNjTY8nLwMmsLJ2M1x+8vbqZM74IODHQZgfJ3Pv9VnpGdAzoGdAz4CeAT0DB80Af4NHIcm+ufmTv88NeO24Y7trhOOel759PQNPcwZ2BUB4oXduqBvSUybF1iC4sLImL3XK3Sfh1y7L8g1NU8/hcMvbbw5La25Q+X3QhDsPpkM2qPloZAB6wMFBxvwDMgUZK1ljejSBPZAdogliYRtghBDo6AjBfPq8XRmvExzJsZaE572RkLo5jShS4vlVlMoTlrLyGSE7hWbtDHqWcBx/LwrcGZO8j67xbOumvPOHV2Q01SFGax8kuYpiSA/Iqf4+7Ti/cl7dk1ViQLGfnIeeUgDSWecAigxJJQ3tI8TY8CC8R+5LuhCSqCMKlkEIIMstqSxXwWYBM+VSv/iaFQXWfPZBSUwooLt7IbkEIIl+Ib1DXfLy2TPI6zK4ETbpge6wv6tTKkszYlmeE2OrIhVfVlIzFcUEYcyuxOU2QIvL8NsIwAslUF6WMszca+heb0GebPLOZ4qp0hnSPB5mAdpcvXFdHMWyjMG0m+BHV3cXuqE1I0oW7Ql6NE1NWV1aVMBDF5gfjKUF+ApMFRRIQ9ADLittT3uAYGacC2CH4L7lsokzZZE+zJ9MDDJOSk0Y3ZsvQrLFgQ7mDiVJtYrnLR2QhYJcTquIAhsK5NN37ylwpSd8WoFp6XQG+12WKCTVsjB373oeth4zDYAfFjn90qvK2wSqRGKH9ldlzWS+3pjDXBuQIyNwQtDKIe6GW3l6qPMp0i3Z5VXIe9HIvFPcBg8YHBWcI4NS9BolPbGkQBSP264kw+p++E1M0Ogc5xdApM7QJTkPkIQAUMpal04Ys6fKcak5/MogkWb1DQBklu4q8hqRcmNF6sZRHNEaiq7TQkNNiMHJy6+8rPJ6nJEv1eSDm8vyo0+mH5rGmxd65XS/79AACIvlHTCqz2RvqyJ/CP46B4nNDJCNY6TSCZlfBUDRAOsIx3x+xXfoInl7fBax7+fmFYBwxhvGMa7IR/FlnNcJ5a+xEWzY734RQLgOLw0W3eljEgXIWgLYkgHA+sknP5Xnn/+K9PUOHpgFwrnTy4TMA4IHvf1hmZtNKxZF4JAADtkBZN188MG7ar4hfD4PhwMKTLh9+2O1X0ODZw4FEjGfy/ElBTqNWM0wXLfLvRn4MgFU8IGpdxgpKRb3b9z4lUwj94wrY2NyFyA19+Xzzz+UN978PentOThLhrmvgPVE8CbkccopNzydynX1uAuA6kHBlY3nWB4gP49tCJ9jHJ9/H8Wx5TaovMZzZ2VlQfyhXjGfAL+ijfu+3d8nmQGSSiXxvbKEhoAuvF8eeC4RGGFTQiqVkuXlZVlaXELu6+o7klKO+v3Jz4PT4VKNLwMD/UKTb6cLvzeOiEG53bn+rD6/V/CD+WkbnvNvnfnxrJ4x+n7rGdAzoGdAz4CegQcZ4O/uWLIgd6aTUKF/4D/mdtrl3GBAQmtNt8eVM50BclyZ17f7LGdgVwCECCnBBnp6WAFgMOi34UitKOCDslc2PwvPWnS0emQ8mJJW2gDpo7gYuo0Qk3JI1komRF6qq+h4xHgdGCppbEqwmZGVZXTpS1wmwBBhtFowADeQiQBjXEhfKfADDJD4zRmZn9WYJCOXz6nnCYKEagkFgOT6YKgOIIPrKHAkAjkYmIuTIULg5rnvfAW+FPfV6wIzaCckpYqQRIna+1H0WVHbIHjC17PxhLYcJ9SL2WFuBHiqGY1l0jd0Tvy1PjW2tRYHyQCADTxGpH9NHx/67C2wKhhvXHpBAUErQJoJgDzf0yXhxoPisHHIK7amC74ay8o8faGKgkkV6BFSQDGhXjVmW+YmpEAW+qsMesckMpIBUAImzdUPcFE+JEOQcILThdouJbzclnNKour88BDAo5QsovjN52luHi57ZHURufdrOvuv9l5QjIY0TMZPQbZnOnNPWuEBGFIm1XgMvxUbVn/wmKF4tjojTZisx40FMfvqUr27IgvWhDSnYPyOomCgd0ixQRh1D5F2mNif6xY3xhmEDFURsjFkVOSXTOiWdiq9/twSnq9llR9K15BH/PD6cFofFChZZLdUNbaC3fUVWR6YUyAHXDnW2R8++Hv0gD3ihpTYHbmqvDgKMK+fnLwrfnSE1wI4NjzdIJPVBcEwvwd8k3nkGGkngEQpMfqHMHpQyM9aPQBQAH4kMM7kMrrfwToK+SU6EBYPCrRBL2hECDO6s70VTaKM8l9pyJCRgXNSgoVZMifaPwJsFpMqOh9FUOs7CgZQHtJgxWLmwMX8zQyQ9txYyGbRl/JIjrBm9r0AmarpUuzQRXJug0Xs+eW8OMGEcYLhVK9bAYYkDy3FxLEJIHz88c9U0Z3gx5987bzcGE/J377/S7l35yO5f+8zSGX1HarQvwT2GhkIrgCYWHh/EEi4AabAWX/2UCwKMjNu3fxgHUD4MqT/3nj1lCz9RUVi2Cb3izJTZIocVEqJx3aGn3mICNgrnP+KJa9AhHDxcFJSieSyApk4Vx9kx77+2qj0jofk+7/8SO7c/kjt22Fzz+NL6bGXI5AesztkxOGR6yiC8/nDgCvMRxuY498DVr8av7fTLffnU4c+tirhCHp9JZOLcv5cD77ntO+w9msn9f74+7a2z0y5DG8xfN/U1yTLuCQfT05Oys9+9h7Ouevw5gqL3YbfT+Th6/FEZSCT/UQsplUZGT0lb7zxhpw5cwa/XSDxegK6CZ+oRO4wWYIff/PXfyM//eH35Bsv98m3v/4VXBJs7Sfy48/G5Qc/+LFi6+rgxw5J1V/SM6BnQM+AngE9A89QBqrwyiX48ef/eFPVP9pBVsh3v3nx2AEQ/XfjM3Qy6rt6YjKwKwBCGYcpgAgMSkr1BXokC8Ahnowo8INBaSuGzxWXFXUPGSBwInyuTsXAYBgAgERCA9IyUmoKBre9AQUq3K255cOfvys9z1+Q4OINCXe/CHknF5gWZIE4JQNNJMpX2VNLUg50SQrF1dTNHJgONiUDJXJNsSwoZ0VwJQ6PjRaksZzR4EPgCOdAcKTz1GuYO6SSUpzHNRR7BmDg3kBxTANRCIAQCGEQBGFQZityfgCASEMyMQBAkABjtFJlCSSy4j3bIyPeHinkS7JcmVsHTrICVgpkUJzBHom+fVlMsykxwHeCYxHMWZmCpBMAkfEb9+TyqRFlJk+QhT4q//zzH8jLF4alY+BL0jnYL6FKU0xdAJEQWTAoDASNUFdfBPhkCNB895z0mCNqXYbLD5YDDM07qK+PW3YNpGpl5mVl5pZEBkckCpN5yps1QoPy9W9+BebSKGhWsIO5iIS6R+XC0JfQvWyTiYlxMYJx0aimYNQMU3drh/QGhtV2FkqzkHyKwTcjC9YMTidIeFmWFlDo+Vy9PoYbCwPsmlxAUbx9whFk6W06sLgNHf3gSHTBswXoQ62aldjqPeUVEjX40cvegEdJUQqFW8pofT4O0OvFV+G7kpDi3DJYLeewJJg8bvps2MRnCMCYvEM8/ZAJA7ulXJ9R8+jsxXaQNxeMhJsArWJxGLxD6uvtsa/Kij0OEIZgFVgFMKc3eMBmWQM/eL7ZjShQoUO3WqlI0WGU6OWz4gZzhLJZedyTWWK0wCslPqUkTSx4TEP2ueqMkh7zBQdQ5Nr6wl1N7jGF22GR1y72SBCA2cYY6QJQhteOItgR29XtQjF1Wflp0Dtiv7EdA6SIQjilf3pKiyjoV5H/pvp7ftkjpzx4H0Jm6qAFeBaZWaxmsHhNc2ve2HHPbR7Gz4F5IMDBYjvjynC3fPu33xL/hzflJ5/flDkwtchQuHT5DcWaOcg+1OtVBRYMgEF1zq0Bmr4w3m34KDssQETviV/88h9ldTWuAIRvfHlEnj/TI1dv98hf/MO82i/uX3/f6IEBHOVdssZeIfjBcAEMvQdvn0w2c2AfFh7Xmzc+VCATg+DNt756CZ/5c/LerXEFinzy65/KSzChP6jUFnNPWTYyh+g1oubOewAgfL4z0nWg94EaCEFgLpmprANzfK7XEpJ5AESUxXpVMWQOBwcwxysrMRkeuSJW25qRUnsCJ/T+JDNAwvCs8gB85+c+u7tWwCR795/fhYzc+/jegTzmhe+ATTigzgurVQdATugptuW0qmhq4Gd6OhXH75zr8md/9ldy5cqg/NZvfk0Gh0d0EGTLrO3vyTb48eO/+287gh/0/Lg7Na/AD4YOfuwvz/rSegb0DOgZ0DOgZ+Bpz0CuVJV0vgJ1EU2RhvtrQ5NgFQzs4w6dAXLcR0Df/rOYgV0BECZlCKBH+AW/pOeNChQgqHEDxSN2yROEuH4TMiu42A/ZNFYA5axWmmACTD3oLA+n7euASQV/p1HuJpAwf+uuAj/Y5d9M2MXlqygQIAbZGRsMyWnojb566YHZOYv19BzxeQdgQ21XclQLKIKnklOK/cAoQC6qA/W/uxPX1OOMEUXxQlAtm/YbARaU5fYUDNtRUCJzIpBehr59Hq9D4khaYvanlOwUgyACPUEGB/qULBdZIx0AVxhaHpxSKXdIDhdggs3P4Z5gEAEYRnjRKCvo8iw6S1LJLsjy9C3xRS8ij1pRnstwXy6f0gCVtrzWgMerwI+zY6Ni7gL4MTAmheV5KYPCFwnaIa0xK5PXbylmyFQmBRaNZmA+nb0LYkhUuD74JopxQkCExfxek0PlezUNJgtADQbnStZKK1mRySkYkQ/geeQrCK8VSw6AUxLcD6zfjWMcQ3M2wRoBCHH5lRcUWNLKxVQR2ljzStDQBbClKR4/WBeQhJoPxOHXYVdG7Klrn0hfP7xWrFgfnh9kkZCxsZwua74oAD/azBKCKNOYmz0Gb4/nx5T0lL/ZkLC1C0jKkgwV7OIAk6WF4xeA7Fa6ZoU5Pc6RABg5uA8ADLo5E5PZax9IEgVUAjeC3N+ZKatzzGOsYj8yil2ShrxWdagKDwBIboFVko5mJBmbFHqxFNK3cPzPiQFFKspz3bqxINnJCRROtQJn98V+cULfP3l7WvwlnKf5FbBtMPFel5g8NkkVa5h3TZbhI0Kghh3uxx0OfNlfORuR50YeZqTwRwBNwo4iWPQbHuqRhflxdJOjmGq17bugvxUDRAMo4gBAyxJaY39YrUbFBBmHJN9hiuTc73YRm+yPdhGbz7PjPgFwi9JYl5zefRf4KS9EAOFXH/zTOoBA9sR5MMSypzrkpdOj8t7nVyHF9M/y1a/+wYGL8G35qC73A8+JwSY+h1EknwOr5VzwYGboPG/vAdyYn7spZRTiX7twVl54/qKa/5uXhuWHv7qvQITr1z+Qy89/GWCgBr7s51ziNtr+GR0DeH9p5B58poOBk7coEIGG3w6AqPsBh1Tuc2kFLhFkIquHub8wos3x0mCfvPtrMA/Hf6328cpL7+z7+HI/eXzJ/jgFUJ/sDEabpUFGESXUDpKXdg43sksIyjFskOMjONc2W7daNd+r9jr7ua/XkX/IFzKinR1PjJzP4SCf/WRo/8sS+GiD3jMzM/LXf/3XcuduSk6f+pqcv/AKgM5OSOE9GV4r+9/7p3cNgvP8fuJ9tToKX7EzMjN9Rz76+EeyGPtL+aN/+S05cxbeZrok1oFPgr2CH9xAW/ZKZ34cON36inoG9AzoGdAzoGfgqc2AGQ1iA2j0/NpLg0qeuB1sBg37tMbi49x5nQFynNnXt/2sZmBXAIQd7cqAHGyHamb5/2fvTZ/bPO9swR9JAMRGbNzAfRMpSqRWa7Mly1si27GdONvt5KYnNT2p3Jmp2zM198NMzT8wVV01H6bq1u2puj2T7rmp3E53J7GTjpfE8SrZsWTJ2veFm7gvAEkABAiA4JzzgC8FQaQIypRIis9jqwgC7/ss531JAr/znHOkqxvFXRTUh8bS6gi/DRkaCIMeKRiX9zvGpcVRD2IB4oS8lFjK0sU4FpBN43csjPLyhtEXQsunZ6AAsErS7lXqAWoXWKQ390GFgHqssmqCioK2UEGQIb3IhTCNBMUMEmDCYVc5HFVSp8Yubq8VKwr2VKVchv0Sm6XELBYU9I1GgqErP00WdPTdlnZ3u/gbapH3bUYeCEgPKE1Y6DfOCMwi3wGqE1p05U9DTQF7L0fMJ/2pPkUe8PW4zy11kLz0Yvcy15nvweLneJ8rMiAlSATpCl7BbuB0wDrP4XqmUMBHOIfYfCBoUl51bjAwipAmh1TaQnL5NpQnV/C6PSIFULUMQFXBdqUTxBCL/yB6LPjlvZmB9FijJwXMrC6xRXuhggF8yN4wyI95APCARX220kqPyh9hboq1zgXLMZI+leIvRHB8oV8d0w8LrI6uM7INRFBFA+y1YnWKLKKqIlEQkAiUEeZURPKKkT8AhQSSwaXElichi1c8VhMIB5siU4YDKQkgoNwL8iMCiyySIk4X1DHYaIzIEpmYycO171eZIrhpZFPFZjndEREPrLusrjJx+v0yGUZhNA6/dOR8jOIr7wXPDKy08DiG8YjpIO4VXuMYip0O5KSgRqHaeBjnBEMykLoqFfm41rDs2lRjlaJt25RFFsmcqeRt6cFufBJ5QRiPcU8u7/MECbgQ8kOQsWIdD0oEuQ0z5Xni67VKyAe1B44bocoBx9FWLIWw9lBoTCldKtp3S+jUcWS3jKHAvgZ2GaB6HEEOyETGDgji44D6o9hlxY/cVy8psui3uaUFtkjvPXAI9EIKEO6Cp6UQFR9OBy330oSNE9ecz12d9Kki+YPmLbBInV3EJjYsmisVCArZVJksN9SaxWUWyC6cOzpPIDyzvQoFbLNsaaqRg9v98ueLV6Sz48Z8IPdyCv0khlKp1F32UULOeK5RRRGFioJF9Aq/ddkFfmZDnDt3TM3PVeRW863yoriL+e9qrRGDRDiF633w0DceKCuFyh6qV1rMeULSxmhUgrQ67XIG83/QwO+bNy/KpUunFMG1DYTHbpBOnHs1SOpXnm+XU9dvqrVxjSTslnt9Sd5QYUP1B1UZbKlUCAXSIvX9jUTwgS3UMq9tprqEY7D/OktKemEVyLB1O8g5/HpddiNJRPyvXj0p23Fti0seLLdn2QOvwAlrWQFiLK+vr2+e/Dh08JvS0rJ9Xg20HDJvBeDSXawAAjMz6b87/IrIK0VYM6PI4y2TE8ffll/+8+9Agohs3dqmlSAPgHeu5Eem8kOTHw8AtD5FI6AR0AhoBDQCGwABWvm3NxRLg99IGk4v2oSNn7QBX+2mFSCrfQX0+BsRgSUJEILCrA8WnZk9kYcQ6E6ERvuralXexHAYxWx/hVwOjKld9R5rXMy3x1EktkgpSAIGn3ulSHry0wXgOncxisvFcq3ntMLbiaI9nd8LXGEZ749J77kzqkjvh+rgyz+fUAXpVF0zgrDjilzpAAFCxYeESmQadjHMH9kF9UG5u06+ZKHuagyZJbCdQHC3M1CgiAEr1ANdgRFEPkQkNYvnEZJN0oaNxe/A1QtKDTKBneQMBp9E8b0wf1R2bj+kiIY8dwWUG+XSNwtmoxBEzTTslPBwanIQde8YlACtMtZ/U5E4JDSGBvuVWiKMg9qqSDA0YaRiFEBnmNss1gb4fgcd0gi1QjB4Xc0jMpm2wXDFp6RbYJFRUyRO2GjYsaN4cPDGnEIlTQb1TcKqCS14AfkUUEGkoNIIIzOFWI7ZfDI70CXxSackHHkSQfi6A7ZUI9NJdT2Ck1G1pq7L47BzmhEnskxMrjiuSYOyGisAmeBHaDJtyqjuOdTaIjFrWKpRBC51+aUTNl6zjhEU0akiQKB9Y5EijQRKC34t8QTSOSJesDDxEAgQBL8XgOVI4fihqLpX2hA8xftpPB/5E8gA2VXnlA4qZxI+Sc5MSWVBoRx+cbsMBgvxOhQcEN/MxEdglZUmwKi2IXGGFBnMF8oStPOBLvV1BpZaXF9lXYuy7xrqOiWXrtzA1ma/bHEVKTLE7rKLz+eE6mFaem99qtZMey/aX40UpJUajW3NYgKG3f3IDBmqlMrGCmmY8ClSxJsPay4QTNypbq8sgf1Vg0xPXVdhtlazSYKwGQsj0FmsUXF57uzIVxNcxUby44PT3XLqavr+MaayvalUXnuqUdzA7qs2EiDNLc24L0/M5VpUL5uUyFaAGPZU/fj53ATFB5UfbKk8Kx6DDHFPyc3+gGwvmcCua/uyw8qNbJHMIrbqH4Vss4mFZqhAkPHDQrMTFnnLCUMngcDdwUND/fMEAovvbOUIXtsJpde2+psqUJwh4/sPvCIN9ZvV67k07hwJIlck2z5qGvlHhcM+paKog+WfYcW0nDBx4t5zG3M7/ZkiIBge/vyTW9S82aikMEgEKixIIrS07FqWjRfHoHqH5FNr5R0btsz5UwXyIIHf8TjsUa6dlOvXTqvgeWZ/kHRi4xpIhmyu9CvsuUYWqNva9uYC+/wxypYN5BhJMpMpPX+SE2xUaVSX4ncyyPEJ/F5crsUW7zPmlxjh54a6xCBYFDmH3CQSgxX+Ozlcy1kA33gPDPZIf3+XvPTSayCh187vq6XW8dXp2qVG+Gqvs5j7p/f+pJQfvLdaW3cr1YcmPr4armvtbP5Orcf7DdqZffD+P8nv/vWPIIJLpBzvOXTLHYHlkh//5Tfv6cyP3OHVR2oENAIaAY2ARmDDIZCHz8nTUH7EEIA+MzO3MxYoFECpW5CfD8Xu6kKiFSCri78efWMisCQBUjBX1Bm8iWBrhNPmo9jiLPLAmHxKOscCkkDBcdaJojLyFNzYXU+SAfEf4gIBMJ7vlTAK254oFByRIRUWPY4d29HxaWX9xOLyzdsjYq4sQ5HcpHbpg/EQT96gXL/cJfl+twQTIAIQks3d9WAepLF+lypCU/XRh6JbYdyisjMGO3tUHkQhivgsrIfxHxuL5rTTSnpm1M7/ifEk1AbjUlXrgY3XRWmbuIT+oHpAnZ3lq+mubrxaLC0onitrLdeueeuuyRkoSpjlALFDdVmpDNoiysop2d0lDFNiiyfH1JhqLWjJCCyBYFFVmg/iB4qUjqvXUPCzymZYQjGHJK38wO5nLwv2W8UxEZI+SEgmUaBPgGzpn6Il17iyAsuDtZjYQYKgwM/vFSZoVMpIIA6CipHpIEYwZvvWNDEQxDEkdoZ6YxIGEZKssIndVKiIGgvUPR43xpkNKPKD8/Y401kffLwJZED1zpfFX7wNWRxQn1xPB8BbQPqQR/eAdCKBhNhrrCMg5jyf6ouqj6aqKjUXtlZTemd0FFZbDRV14gUZM9t5SZEvnmYnMlxS4kUuB/NXqDQawnXxljETpRjqljAUMtOSHLosvolxsaHQWFid7o+ZMtVlu2QiFJai/n6ojIZRnB6GiCRtcdNx9gKwKZPWpw8q7AO4Dxur04VC5tnEgyNigwVYcSGuFRQrpTu3KlXS1K0rMn1hSop2HZCa/L3KRkuF04N4YiaIvaZc/PaUXOvoktuQsFh8XcgWYYB7+noUm1zIUJnbaWBCgHzybsupeWAe8QP+8e/sC8nRc2k1EYe3mNNkwtf31kFttTLNj8LPvifa5c3ffiZb2w4sOwQ6WwFi2FNlqz/ykXdBEqTUPCV9UD08SJGcK2YRO7vIzOeNQjY9+mmNRZVCNZQEueaaZBMIVFB0j4zL7z88NQ90D4gbtuRMUq5cOansmioQkp6rCoQFbCoQGH6eaR9F8oONKoph2GJ1g2CgVZMXeUC5EjjE/fSXH0gnflaN9uHnV+Rm9x0C7dyVjvT8cewXJ/4g+/YeWVahPzM/o9CGvytz6pXM+T9omHtvXxesxU6k1SP4O7UY9lwA10gVROYO/flFL/JAWXeBvCH2dSDZDXsq4/BMlQYtvrhTfDkEFPvvRd4W+292pn+fs2/jvuR4zKv5IjSkFD4lpch7Wkaj+iMGRduF85/Jpk0uWAE2rivrnrWuADkLa7tz58/Jjh0vq/uK1/5+5AevR3a73/HZxy73+4XGYx8rOeajGGO5634Yx/N3aoW/Rp586mX56KNfy7Fjn8lr33x1TWR/PYz1rnSfuZIfHJeZH5r8WOkroPvTCGgENAIaAY3A44cASY8bvePyx+PdMhW7Y83PjWuvPtUgjZUrVf14MOy0AuTBcNNnaQS+CgJLEiC0wOqeQtD2nGIiZZ4Uc2RWZmMRCcXiMhwdFsd4AtZE2BUMEsSLqsTwQK/YN7ekd/5jdh+imN6USkqeGTuVb6Sp1nEoE0ZRUBxmcRm5DsiKxm56BJ9zNQgTZ36Eo8QDlygoI2CzdDswKKmoTcwgXthMIfSDenMk6QVB0C9RWBGw7N2PkKMgwq+rQNKYQB6UzmDH9qY6VZymisCeBNOBRuVBMdQlkdiYsFad525TllFUfSRgr9UAW6fenqh8fv6sFEe6pBoZEbRichX4YRMFJcRAt8qWMAyzWNzf0tAC8gSEQnUlyJa0soNKFBbexYbj2ZB3ET7fI192DsggClglWyulcS5U/cyVM4r1EAMUAAAgAElEQVSYYP9sJFzGp8BO43EEhXZvdan0d3Qq0onB2xNQtRQWMJh7ShwhqD7yK5Xd0gx2Bk9aisQFBUYfslqoomEbj4L59kbEZIZXCuquBVj38Fg3dokXKYLAE0vv7KYiohq1tFGQWAmsswIKEBIbQ92wIIKtWbD7lpRCCTQBhQPtt/w2vxQ6kf9hpv99FYijq2o8NpIi9VD9UDUiyA0g0xQByUGLLhIbs0GGiUIfgyB7kh/jNy6qnJIyD/JJKhGS7gXBFsyXOIrC+dYmqd/KTIY0iUG1THi4RxIgYUh+8Po5WEOdIFHToBQslflW2VQHJU2+7054/Nzc+vE3z41xg5/cSj8DMqZtd6PMuNI/FmGQQ3Q0s8bqpMQ/o9RG0+N14qkol7PXjgLzgJQ4CqUItmWzsyiwwk4M4hKloFFWXL5Z3HMkQlbf/ooLNGGnQxVybLY1pgmkORhUdgFfW6lGFci29jY5+ukXcvnS8WWHQGcrQFi4p8KDlNoI37uM4j6F6kMmAPbc1yjIugsdw0qtwULUcgrNzPe4BflSAL93SIQs1PhaEL9bmmz4XVBVnxMJwuJ+V+cVkK6wp0OjiuIf3z0h//KnM/ND8BiSH2yRcFhOnfpY9u8/ogiQXBqtwahA6B+LyGnYMPXid9wwfvbL8LNvfO0GSaReL+qW/fidk4vNEwuXxL0DeUkTUJiwKaUE/lFNYbTM+dNKqrfvlrS378sJH/ZhXFs+voU58ydxofl3Dkyo+T8DS69cCaihoW7Yj11QU2V+yf2wH4O659r182o+uSo1UvibdhpKrzT2UHxR9bVA4+tfyJC43VDkLIOAonrIuLbHzXN/Pxbp/7R5QHzIcPKacldw0J6NCh+qP/7N958VBnevp7YAX7Bmpj8Gi9BTp07iZ6VGWavd754lUcqfI9rwjQzDXhMbDNhoX1dSUrls4iwXEBjizXt9dLQfv3fS74mM8YowLn/GcyVKFxqP5B3vXxJzzJcx1mSxWKEyKlV2hWbz8i35FhprrTzHvzm1NZuQ87JbPj9xSvbu2yN1dXeIy7Uyz7U2j1zJD8P2yiA/jjzZJnVldywT19q69Hw0AhoBjYBGQCOgEVhdBJIzs9KNz5DHLvaivpT+jM/Nn7XlLjnQVr7qBIhWgKzu/aFH35gILE2A4IOsaezOjl+31S+znhAK8rAbguKjGDXI2AxUHLC/ckGJYUfIdJlUz6NJxQUNoFyOMqljIS5xp0hUVlEtialRpR5J9KOAjeOKE3651DEA+4ACqUnWibXRLa5pFNWDnVILEmIyEpcRhO7eGBiTnSRlBlGAQgW9tnRcriEfItIVEW99evfzrXOD0tS2Vcq9yKRAQPYgdhebaJQ+liYuCmHTNTEElQGe94J6IZFgd5fLGEiSG7S4coDo8YFoAWlhcSN8HQV7ZmCwKcsk7Ox3YYc/H7dv3q/UEFYQICrwA43kB8mNsuJyRTSweUIJudTbJ3UNO6RkCwmBdCNBwHFNIbv0Dl8QMwrnTmdCqQ9CULyw2WD3k4CFVAJh4lNCvKbEaUsq8kJKmlV2iW0Ctlaw+BqeuiWXzvfKTBA7tBMlUFtUwP4JhQ7CDyXJGMLFy5CjEDVVSd6sF3ZQA+KvL8YanSpI/PrYachn0nMbnwjLIObNlY1AzcICBnNRIrHbMopCbWoWJBaKb5eGO6S8JY19EKoTtpnJWRn3hSWOghCJFqo82PLr3DKCY6Yv3FCh7CQ4mP8SCAHjqagKm3dBAcKweTZ+n18Xk8k8O0gH5HeYHXLr7GVYrI1AXeOUsioQKKl0+DuPTY11SYiKGZTNS11VUmAuR0FpSE4PpItLFQiX98YcSoUzUYp1gzii+qM8glB7a74iMApA+nAd8VHkZgwWqLyUcXeTFOIeKIlGpKi+CQH0sB9TBJYDqpiYIj82N1XJzABUCbBMG7fAogjPr4Xmdlrk8I5qaW+8u9Dpsltgf3WnqP1V58qd2VU1NXL40D55+91TUt+1RZoa23MmJbIVIJyPt8gqY5ZaKHbwKwuWaKplfKWqiG04LyQVcy/n+qUeBF6gnhk2izcqQEgK3q+QmX12fr5JXCh62x0OsYK0ZJGTzfhqHG+C+o3NgmyKwkLrvJVSdn/Z37NwWoBzd5dVKIUKW9iK8bK+ku6iVZINKiezObfrbOwCZzHU7fHNkyAcY7H5mxF4X4gC53KaHb+PtzWWqWC69G9WEOH3mX+ufRObQosNc/cD++4lsWfBd7lz5/WtLnfOY7/Y3LbVI8Ad2C+nGdd2mxfXc+7aLnY++2eY33LbxGRAKXyaW8plx84d6263+lpWgNy6dUv6+seksfGZ+xKOvM59/d1y5szHyAA6LsNDvciQGsLPaREUoXapQUF99+5nZM+eZ+/bT67XnuMFx0fl3NlP5fTpT+Q2CDCOx1ZUVC5l5dWy54lnZeeuww+U58P+w1BzMnvnPJRFHZ1XYQmKTQqJ9PsBrqvcXyuNDa2yGZZgJIeWayuY61pX4zgS1/VQtl6/cVouXbyE61ers0DucyGmpqbkjd+8IX/63b/IN/bVyOsvPYtMsoV/l2nlx32A1C9pBDQCGgGNgEZAI7AgAkU2i1T48DnYcsfvqrLYAReM5X02W7Dzr/ikVoB8RQD16RqBB0Agp5982k3lOxBHPmGS/MGbKgDd7oDCAERDKjYhnpGkpGrqxRVGkdyJUFgoQEpiKD6DOKASQ2VAQHIwYS6TGYR9kxhgc86AwOgwS4FzVqaiRbCgguc+duE3OPeKKRxQu+kLwgjatpulyVmDoFcqDMYliaLQvid8KoCbrbzaqiyeZpKFyFywiL+kRez4IOp9phq7Jz0qmDsajUsqH4XqoT5oEApVDkR4Mg9jxpWlEe217FE7Mk1CilhQc8Rm2hJvpXgQzB0AaTyjlHNQWIDwYB7KpANFU4xJYoB2S2zMEgmCyOD5JH8sxcVCAsECGyjaY/lg33Tgtb+CgqZbkSrMvmCBfRhB52U+7sKPASksHLVSkh8MPPfD2stmcyryxZfXqXJVAr1QrqA4SfKDxAIL9fy6H2QAW0++W8q/vV1KYKnFc5WaZHxCSmHdxO8nBi8gIN4pBbVVUpQckCLkqYTH7cpKrAOKmrgN1xKZIrR+GgXJ5IYqpABjkWwQKDpcM6XSGTujiID63XOqjGHAez2QJmQwB1pUjZvSZc3S/DrYxYQl1Y1iT9+AOFG0pY3YDPI/YsDzJp7zTATFidD5wEwBFCx2iYN0iEajSqUxEcEFwL1DJQoeYE4FijCJDwdlwo/8FqhbwknstGcdfC7cPd+DYo+/TmaH42KynpZkrFQRG0lPmQwiu4aYzcI6ixZicbA9pQiAJ4lFfPiPVmelxYUSw3WORruVOw9c2YTkyeamH8iF05/K6CSsx3AMlSKtUFJMpG9JKaiwSy2OZw5JIUgWE3ber3aj32VliUPKoKrJbKaCPOWFuVKNO7OpAjlwYL/c6rgln//5XfGANCsrpUZr6ZatAKF10Nebtyx9Io5gQT1/mWoW7sp/Af8YJp7LubnujGZfzMT46b/7P6Sv9+aS87eAJGlsaBOSDgYBcb+TOA8W7qsq65TqhY1jch2ZjzPXxHNYpMxlDcT9xZd+JJs370aRNK0Cud98OH9ani2HJGJo/VP1O1S3Bv7G/LPXkMucjfnx2E2b2uW//av/PSfseV5r61612z7Xxh3fuyu3yWxFOoh+oXsn+57KdQ3GtWW2QH0GNtlzM/o3mWbxO2sKhebsIxb+ngqAUyc/ADF0W77+tR/id1puP5sL97Y6z65lBUhP9238zORLdXXLouBQJUEFzlu//5n8+bO3VE4QSUQfNiBEo6MyORmU8+c+lcuXP5NAYECOHPnhVyZBRkYH5N13fi5Hj/5GqNi6M15Uent7cE98JBfOHZWXXv6xvPyNHy+LBOF6BgZvy6fH3pCPP35L2colkMNjjEEgAsEelclz/PNCYFMrhw9/V1742g+WrdpbFNQ18EJJcRkULhXIfrmKtb2w7ojFRwVhrsoPzuf05Vvztlda+fGorpAeRyOgEdAIaAQ0AusbAQvCztuaSuSnjm0ynd6HqBZU5DDdE4y+GivVCpDVQF2PudERWLIqyyKPx4riCJQOw1O9crEjLBPIAWlsapLwmW7xbXaIvbYSRIcdH9hRXM6HPROUHfX+bdhNWCbdkV6ZunlN+qD0GEBpn0qR4T5YHIGMqG5uQuHFr3Y8O4ugdrC5lYLEW4AqDogE2kFFYfOUMFcoOydaDE3EBrHTPm0PQ1sohlNXNDBfo1uGAykpq89H4R3Ffniuw51IQoEe7LBEMb4UQeYuh8S6b0CN4pDw9fSuRx+KZFaHF6qKpAwNY+flnAk9VRQkbhzlXTI+J4CxFW8RS2BCes0oxheEoJ9wI3Tbg+yMbjEhnD0041Kh13EUwvNAMk9FJuXqGAgLECFuj0kpM0wgXZKuXikGocAQcSpEZpBB0jkBhYjy/0IN347Y+OoaNZOZeJfYUbR3O6ZgPXVRbrO4P5evQTsqr8oOgZJBjoFcmBAnFAkBkB8WbI+tKfRI+56DUG9MSax/QF0PaxJh8wNDMulCOD0IhRqEp6f3ZoKQ8kwpm6e8PNhqzW2IZ6YGiaFkibE/O23vNIJQqYl4sVTX2mQEc6LqwgQFEDM7SPSQ8Om9lg5VV2oZ2Ifx+TjIm8bWKvW6r7ZJUu4hqGPCUgXFhC06IyU7ds+rSBi+7s63KSUIDpI8FXwOCqyXuSiDYgNpMYWxGTjPTBlalNGKLIKMGaS0kCuDquOsWCLI/wBhRPLJNGGWSlhYUZMxFL8tEeTYPFvkksuw3YrkmWSzu1tqGqoVwULyosxnlZ5+hCn3pBU8+7DOPKhfBkZHpURKZbLOhVycEYl2XpZhEmlosyA8GHpPJU0KYwgCtNdCo/flpc5R6RpMW54Yc6r3u6StoUQcS+w0X+4aynA9jhw5Iv/w//1STpx4D4XW7+cUiJ6tAOHvoExbIhbw79dyLTIbfRjHL2ablSthkD0n9st5P3ngCEiHI/i5SvvrL/aV5xuvZfe12PcLzT1zHQutKVd8eO7mlh3q3xKQq+lx7my5HJu5nuw1ZM85+/vMc+/3mFZfuWK/XNyz52/MMfteedC5s//M65TZT+YYfH6pn4dsjFiovnjxC+nsuiRHvnYIGRU71uUu9fv/Fshe9aP7nru5xrDxgI1WTws13m9U4Jw4/jaCs38FBdSUPP3M63LwqZeUaozt2tXTiqi4dvUySMVfS1X1Jtm754WclXTZ48ZgW8rfw3949+eKbNm3/0U5eOhlKZ0jpknS/ulPv5aLF06oYyoq6uW5576dM6FJ8uPtt38m7779C0XeNDQ2Y4yXlCWUDbacbFF4nVIdwbwgrmsMf0fZ7ke2GL9XMteznN8xX/X8bBzv9z3nZbe7QFT58L7zAgI3Z+53+IZ9LVfyYyHbq9ba9WXVt2Evsl64RkAjoBHQCGgEVhkBEgylbpsUu+51KMhf6A3iI56vVoA8YsD1cBoBILAkAWKgFBlNW6fsOdiqnioYhz0UVCHJsnFV4CdZ4HShwI8d2FWWaknmucRauknakT8xBAKkAnZZEzabsrmiqgBMhgrdHusYl3KoJDgRp61c3CZkdTiDwhwKBnSHBwclD0RI39UedUxNjV8RI6nOSbE3Nqiche6JJIrhWAwyP8RXrOymurqgNoA6hcHnNz+7hlr4rDz99GHxwvaq2F8ipsY2uT0wjB2JCHVHPkgYyg+2JKyhUgUp8SAwOgwSIzJUr54neRENjclQAB/YwV146prEnTAhVwJLwRipGrdU0h7MnZR4JKlUIxOOpAz0pUmfCfj1eMqw8x4TDcchlYBSwQRigQ3iARkvMqtjiZMTTw+MXVF9UHnBbIsPL98GcXFVGlr2SirJwO0kgsuD+HdC4ciMklQ0IAHstJ+dLeMUlWLkYnevDID8kLwyhPQGJQACiIoH022znDv2uaQQ/N2Ef53BPtiPwZ8baovL6Jf48zoFEDRdgTyHfGSisKUmhsSD63R29Jb42hyKs+k5Pihe5KU0bisXa4Ae307pw3U78acPldUXM0iqypwCky/VBxUv7sIRkB/OObJApO2pp9RreQhEv9HDaxFSapO4rQ7KFCp2SmUcZAXJmymoYtjKirZKtLJagghHH7NPSnEAZBLJFmWikw6Hn4bax4LQd9+cKxttzqh+Ka/fA8VGOXBHXgpsv0rzn5Rqj0MpRC52jyDOJi6WuFXOfzlXZiuuknFct2t9UZm6fVrqYZdVvW8vbLzGZBRjljLTJJRSNlrXoO5xWoMqf6Q/ZQcRsrxMCjX5h9DC0YQcvzQk753quqv3p9urpQ7SlpUmQFgPb25ulheeOyi//dfP5fPP35Mnn0znW9yvgJWtAMmGItcCfvZ5D/r9Vx3PWOtSXzm/++HyoPP/KuctZz7LOfarzGk55y6Fefbry+l7oWO/6r2yUJ/Zz2WPkf4+NzqABdlbHRfli5PvyY7tFfLc88+u2x3qa1UBkkgg1weZM9ZCh7KoW6jxvmP+xvHjHyqygOTHj370H5RVoMmUDktP20O5QBL8n0pNQUKEz9Eai+H1bHy8kOLKyODgXGh7Vwh1FvM4mDFE8mP7jkPyF3/x17JlC8iJubyhaPSgKtwzs4jH0JJrz94XcsrECYWCIHPekY8//I1aD8mV17/9E6WAo8LO2OHGD3o7dpJ0e1r+6Zf/USlcMskWCxQwmb9HSNpMRaegVEogRwR/ky0WtWGGSj+uKftngcomYsM1M2OEbWpqEgRTdP58m9V2F26ZWBHPxfJPSDTS/i+z/4UITl4/45okk2lrxoXugY363IPaXlH5ocmPjXrX6HVrBDQCGgGNgEZg+QjwfedgICJXuwIyjdoeW6GZ79PMsrXeK8XMMF7FphUgqwi+HnrDIrDwp/MsOHqHYWuED/NmhJSnomlSYRK5G1N9PVIu28SLYPS+Pu7ku2OTEgwOy768V8UbB0kwjA+lMyHsmEdD3gfVELNlPsmHV3QchIBhqRXqQpE/BsUJdvo3IUuCCoLx/AIJhyJQZiBZIwSbpwlYaqHgXOD2qp38ocIeiaMIzcYd/mxFt6GSgKc8W329UzYVHJAgcjS8eVCWgHSgEoFsSg3O7zKDBBk9JSR4lD3X8IT4JvB60xaV1zEeTuc3lLXCbmk0hPnB8svSJLYijAU1CG26ErBrKiqrUQQJi+Re5J0k8aGdChAqXkq8RYa4Q/wouPuSyLpAMDoJlAlzUmV/TISSKgtFiUBAJpFUyp8dRfi8H7ZQpVI4i5Dy4kqoYWakqKQKhA0CypEFcgP+2hyDKp0wSJb62Ts7TgcRqpqaTOeIMC5lAgqd2wh0N4+5EPKe3mUaAbnARvKDSg42Yl4NkimZQuZKf4+k+m7DLit9fHN+MZQWU9JqTasdaO/VvrVEha4PUqEDxY4znlSkjK9iS5r8qKlTQfXT4zdkSKkzQE7U7xIkREsIeS40X7rZDF4JFl4pFOjz+RwCz2E0BkuxIfE31CrLqlkQOn23u9V90FhVI9fzSW/gPNicFabSBaESS4X0IfyYQfY7cX0FhImnsFxgjqXUIcxnYbh9YPa6ItJIsEVB9pDIKoMSKBK/KZFeL2zVCsUE8gIiGtX8+bPqukVNUCnh+y7cY13nTgtzZKhAQcqN+hpGuH2y1Dt331wUzscKy6y11qZxjdgKLSahMuRhNVphHXr6EO69cfnjHz9XO4F37jh4XyXIvQqQFOyR7lh0sUCWqTZYaANHdh+LrW8psiVzrMw+cu1/sXFX6vml5r/QOGtl7pwb58/55LqOtTL35c7buA7Lmf9KY2KQH7Skq6vzyKuvviLF2HywXltulM/aXJ2hDGDGBxWczz7zLRWgbZAfnDUVTLRl81c0KNuoSShGWMgfHRtW2S18D7B9+0FFYrAPo7FvWmtlH8PXac9EcmLv3mfmzzMIBxIKtLCrq9+mCJDJ0DjIA2SqgcDIJhqyUe3puSnHjr2DTJFuad+2X5EfTyC7xCA0jDHYTzHsNvk3gGoQNhIK0wh+n5nh36T0+xESGUPDA3dlo/A4EhRuzGf7tn2y+4kXpLqqfp5s4Lo7u66qdZP84OvjeB9Ksu/K5TNqHCNX5eChb8znj4wFhpQyJjA2qHJJFvv7xGyTS5dPKiKK6phcyaFsrDby91R+vP32Wyrz47n2kkUzP7TyYyPfJXrtGgGNgEZAI6ARWBkEoqh3kPz4+z9cQm3mTt5nA9wvfvLqtlUnQLQCZGWus+5FI7AcBHIiQDwowHPXFoPMg9GkmGFHNNw3IgU+jxTh+QH4PUVCUDWgeb1lUmUvUUX5ri+Pyg1me9hCkhcA60pZAhqL+/A0UjvzxI9i+HCRFOA8u92uxsnDB9zIZLqwHIYyQuJhMSfyQAY4ZHgmLuNK8YD88+lCZXPFwjkbMx24Cz+EHIcdeKEQp86wqB4ireBRJAgL1r1XeT6LvoPiD0IdgVZW7FfKiBLriHR1z8goitnMwCCpQsWCytmAKsETQ8x5JATLLocyNqJKwmuyymSyT0LIoRCoRny2dI5HM8I+p+wIVgdLwmQOkiN+aVDZGAKVSoBKGLR0Rop6KN5Zi7gsJeJF2DNtu2yRoHSN9okPpIfd3gBP7X6oHEaQIRKSsC2sSBOW/mHgpM7vniqCOgS2TMgPYVF/NtYHSyjaVw0ouzASJZaSUkHuNdbUIOOwHevos4L4mYWSIc2Mm5DvMjvtk6lEVBE8fQOD4kD4vL/EK1dAoKStyaZBdqRVM2UFuFYgspAxpRQ0YVuR1NHiC85pVkcFrkNcqVji+ZUgB8ZlNIjzQKp4JxEuj2D5PrNdqoHxKALTZzCHsLlG5Vura91/TnbBhouqlrF8i8prSQTscnMmKKl+WH4hS4TZKeFe3Eu4ZpOmOnHhGM6RuSddIGRuHftEWuvrJYkw+tKEHbRKXClFogEQFSB7PHjsgu3YbH+XTCPTo3/kMoo+z0gEmTMO4EoiiURVEte32DShHrsxxu3AoAzfuKUIrvy+PhBz+bg3R8UJAoQkHnNpwp4RkSu4zTdtWfWd1m78TBzeWSXNVdQc3WnFHip/Fg4evevAB/zG5XLJK6+8qs7+6ONj6iuLTNx5nLnb1+h+ZiZfsMlXNRt+fnTTCGgElkYgD7lNAlI9u5EgTCbTyo+PPvq1Ij/+8kffUwHN67mtVQVILphSOUDC47/58f+mDncVuZWiIfv3oRmKB6XygDLCAjKZ6geqC65dPy9fHP+jChl34O89lSOGGoEF/ffe+4VSY5T7GxVJQiVKOf5W/ugv/1elpqAKguNlNoPkKMRrRqPiYinyg2RFBxQqXfjncDqVyo9KFc4zez3sl8+RsGGwe1lZNdZmkZKSSkVa8DWDyPj9v/4DMkneUzlgxSV4T4kAdZIYQ1e/kHNnPpHzF76Q17/1E2lv36fGSqWgiu3tgI3XL9X0+/H3vK+vG9lnIwpDtqHBDrly5aTcuHEeCpj/SQ6BCKGyhGoX4rlj1zNSXl6HLKbN98x9AJtNfvvmz+TqlS+Q7fF9FRSvW+4I5Gp7xR4zA8+18iN3jPWRGgGNgEZAI6AR0AjcQaAA2Zgh5ACT/BgdjyL4PL2hMojv44nVV+lqBYi+WzUCjx6Be6slC8yhCPket2/fFjsyIswoUhdg517UbhVfoVWRFtsRHDsBywGPDQXgWL/K6LBvbpGua9cliuJ5HERJGXbkRwdnxO2PKlsqNhcyIwqwqy6/yoUP41ZxoC+O5YWdUGAwpFQZo7f7JEF5GorrcRATFkta7YHIV/FCGeIOmOZDyS2pfimpOQCaohe/5CbxIR8B1gjlZgvnDcrNyRHZ5C6VXVUV6rlOhG7Pgiwp8bjwgbdRwihej07PSpG/WlLIqjAaszZG5or9rciouHijUyxQkUzgA7sfeSK0Tap1tIHYCKuQbhIxTVUoVOQ1KDsuqj2o/OgFR0RrJeM5qkhsIE7cCTAF4AsUYQELLR47UWSVZFGhDKHokUyCVMBuT7cL9k6zJoWNQXhQTeP2+CQ4HpeCIdh3ua5BGVKhskZ4THEkTwah1CkEaeLf9G+ldUebWta1a5egBoFy5OZF6ei4hmuBAPnyEqVcwdZMSeQlZTgfyht868D1dXuqUGRAgQ2ExgQol6FAtyJ2OA6JirxkDMQLgq4taQWKGeFS165fldLUkAzh2pMkoSpDYgjmQKPqRxIgC2rapA7kElUwUyAU3FC80P6KfbKZYy45NzYlwwOfqvuA5Bob7c3YSB6ZolDcgMBgM9mSSkUTh7HYUNdNGWaIfFWtIk5SyJ0ZjyH0HvkyJG/CiSmsrUziIK0SUzEJgZRhK7JaJHTrospG8bXuVMeOe0NSZ6pUhM4Ecj3iPre4QIQQgzB2qk7OkR8eFGq8Mx4QISncWxEoTCLSD1zWQiu0FEg78nJaa9NqHs6JAehsKxmCvtBaSYIcOfKieokkCHf/0gplsd3FyUTaco/Hm4FzYoHC7kLj6Oc0AhsWAfyMZP7cGDhEoxEUfZG7gGIyyY8f/ptvKfJjvb/pXs8KEF4bEsCG/RS/v6OSSF85XreuziuqaG9DAb+ysklZPxWB8HjywIty6+aXighobGhVBALzhkhGnD1zVGVskDg4cOB5KDpaFUHARvWF0bLHiyOwvLf3uvTevqQIF/ZLomSpFsJ7ky5kYI1iY8rm1q1KRcEcjKWaE/lprTiWxA1JOmM+JHBI1L333i9VIPy3v/Pfy5NPvSwevC+MYPMJFR7vvPMPCFJ/F8SRR0qhvq2prp8fjrkitOFia9r0hFKjGGH0Z858DBLjb5Wi5vTpT6StfT/eV7mx1hb55OM31dq5FpJTmfZWxJXXorvrguq3srIef7vWr3pqHqxH9CBX5Qenkx14rm2vHtFF0sNoBDQCGj7TYocAACAASURBVAGNgEbgMUOAdY66Crcc2VMPFfUdwsOHWlCJ++6NQKuxdK0AWQ3U9ZgbHYGcCBAW5RlWHh4anyuUV4mtK6J2I7JFYM0QjyckjCwGUyIo4/yMDeco7oyXRuzkT41JBMVnZQWFHJBEf6/44Y1cDAWCHZkeXbA/GBmdks0gQMxQUwSxY29oaBTZDvxFRauldLHbApKkKGxVdlyJvrBMYl4RZpGEutU8qGg4OT4q4TEUsDE+C9kp87TU+GAjhdc31SDLA6qOKAoFhh1SxOtDkHedzCIgfDIF5QYsmgpcYRWozsK2VBShGJ9K54sgByLQm7aJ4uvFJlNaHYACeR4K6FReODwWpRgZGg0gR2MWKgHYNk11SRJqEE9qRs6cHlAEkc9fAQVJXJEjMtktVyZDUmNuFKcb5yuLriqZgq92pWtSgsl8HJeUqDsfz4GRKLdJvS1duB6ynlc5JVTnFCCPxYUrStRUlgnsvKIVFTI7WSA9sHPyx3uhkPFIAYolRd4mqQZ+k8DZMmGSYB4UPjgvlEoXnrvOvCvFze3S5KlBIHq+zCagZLB5xAHCx1voE4tvQrr7rykyq6qkRalTvF5kwcCeKpkP0qN3WhFiCRhGMa8kOATyjBcJZAPVQMVNHskDuWG0YhMe439HKggrLxPIIuCHIjlD5WOYX1kFsj7y4mJDlsn0dEyGg73pftBBsMQneSOzUgdlQ7F3k9opGwwEEGR+FY/tYnH4FPlhqcG8cTxxTYynZCwYlMl8hK+DyDLDsz0JcswB9Ys9hLXOtcDVs5iUHUHn+Sqbpg9EDUmfoaFr8xkzA6mEWPCcgPxJlnrEWt1MYZNS4UzBKisevtPffMer8GAG9/HYZEwiyALJbA6bWYWDmQrukH4PY3q026ESxIxMls/+fBQYdsr+A6/cY/3CsakCSaWsyh4pFi1UxVrDPiiZzMPvHtzfc/ZJxvcb9SvxMtbOx4vhknncWsVqPcwxG7tHMef7jZH9GovJ/Mc31SOjA6ogzsBzZn7Q9upxID+45vRfQD5any1bHcG8C+Z0sNhO+0yqKj54/9fIKRuTZ579tmzevGs+94LkcU/Pd+XNN/6zCkon6UDLKVpAffbnP4DI6FHn8PcrSWajZY7JcYJQN5JU4Hh9fbfkww9BAuDcXbueVcqRXIgM2mTRlov5GKWljeLzlecU1E5liUEyGPOi+qODGTUgcBIgZJ59/rvzAen8G8Dj3AiJ54aQf/zH/0spRHZj3RV+vLnLaHw/yrl865vMIdk+b8VlsRwB+XFWOjtuKBUI1SHFUM/UN7SDJKlVa+/puabUybQgY+PPEkke/gzxWjQ0IIsC1mQLZa/cNQn9jUKAqm7aXr31z7+Qb+yrWdT2isdq8kPfNBoBjYBGQCOgEdAIrBQCrG1w82dz9Zyn+VzHVIZYTA+37pHLGtb7ZrRc1qiP0QisNQRyIkBo6RSeGIGJFKwLXOkPhQJFRcrsxs78XrEWlCpiZHBqXBLFoBpQEGbjbv2EG4+DyKvA7n1aHyXw2py7jcpuGEfQtDlIZcmYdOBfDciIhM8uuyt3oxCQkL6ZCSkIwloKbdyZksnBARTakdWAD+0F43OFXHP6A34VsybAvFD5UJQwiuuTyqqIigGOb67Mk8SxWyBlJpVdF0PHL41OSv/kLTVGEAXuMHItmMnRN9YHqyoQK7TnYnYJLKwuX7w6f55tJqpUDeOwRjJBTRCPp+A1fVsV7mnpFeo9IUMeeGjj8RTkHcUgYmwo6IZRwE8MD0ERM5xWENDmCw5OY5ZRsUaBMogDN9aRKKJShfkSeApqERbu7fhQzn+zwYtC8qM8tl2lqOd5scszNKDIkzxvu5R77TIEXHlOPc4voVICj8eHz6nr6MLGzgTyPNjcVpPKE0kWELOAFOXHJYJryq9DmOdox1WZQXZDWyusu9CoiBhgXktZSpqhgLAzRBUkQjAYk2jCDCIJyocbp8TZgPknfcpmygz7KZJbnjDY9iaQAyBOZjHeeOw2yKPN4kBAu4kZGuh/Ej7gvA94vCIPaOEF4UhVrUfMUFpEce1IiKS8sKMCoVYXTsgsii5k3VLDvdKRFo9IJM8s1aWwzII6pW/0urgKSiSIgHq2Wl+ZmECOjE+nQH6MQ/3RJXXtu0GibVZh8bz3prADl60c9ym/j0VAiOH+4P1FMsY7p2QyrM54LBUsLPBbnJOYO6zjinBvYBwTyLLVbiQ+Pjl7W05dTZN4xny2N5XKa081itv58GywjLGoBPnGKy9LbV0Ndvi+p6xaGIjLsFw3lEaZu25Z7GKxmYUy4zH74fczM3yOJEj6dX7PQlX2V2bQ8vj7feWbD76e61djfI53v36XGnelXs/GBHup78HFyOJdqTFXoh9eK86VZJeBpbGWha7HWsPdwGA5czbusfvhtxQuC+GQiSN/LkLIb2Auw4ULxyQ1Oyavf/OgyuJxOmHXh/v9cWjrXQGSeQ2M3I4//uG/zts2MYzcjg0HR478UF566UfSACUHrz0b1R6Hn/m29PV3KeumDz98A3lONjl//jOlCmGR/vnnv6MyMoxzMsfjPdKLv3lHP3kT6qAzigjgeGwkTngus0UWCvnO7IePp6bwdxgbT9hon2W3322xmH38/b4n8dDVeVEREeXlldLSvBvvm8rn/wbwXBI6JHxIWJDIuH7jtLLTMmyueAytuLZs2SW1tZvusuKi1VhJCf5GQ+GSSEBlDGKDjQoRqkXYH23FSOhQoULsSCQysJ45ImxtbXukAgoQ3ZZGgOTHO2+/K//p//5bKVe3bo18evz0gidStf3FxQ4ZD0VF214tCJF+UiOgEdAIaAQ0AhqBZSDA93AMP5/IyP/g6XS+oP23Fe4Yq9n4WYY1oldefVEGBlDT000joBFYNgItzQ3LOienqmygF+HeA1elttIHz+a2tHoCGQeCNIXINKwZUBinSoQt5U2TFeobuBD4YXNFIqRvalRODXVJbcqnCIGhzojkjQVwUGB+J2fIGZKu6R5xTZZJXyDt/W9Cvz53hSqYWqDwOH/8U+k885FSdwzCRonZDo2796OAD5sqWDAEQVLM4INrPkgTel1PWFCoRq6G27tDZkvGJc8cQLg4fKdRwOauR5VHgWbC7v7JZKEiJDyWMmhO+tXzbAx057++3jQZokLHK0DU4LVwgr84UwjbTO/yZ16Kr3EzwkS3SkfPhFwY/Ej1kR9HwDmIg2keP9YvQyBeHI3lsGzCJUDGSUUVbIkGkGECEih/DOHcXqgKQGYMQU3CFnWMKXsqkh8kNWhMQfKDJEir/SDWA3IJ9lpxQMrXM0vc1WWlUldZJ+O9tIUoBv7w2naMSLI/Buuucgniw31wLAo1DkgaixWqhQlpcOD6DkSlZ6QXOIVktnAKio90IDzzLUh+sNGCaqbjA3GW78YcvWL2o4gO6BgEvgdB92OwFKPVmMOKbI+wCZZQIFRmMVcQY1XgQiZQtHGCfAGEKIDXKSssChLjYQQj4wNzvrVIEtZ+dZ1KCipxLMib5LRS8oTLm8QJTNymIoG9o1JbdBeMS8eXg1LWXC8z+MQ9lT8sxbDjwCWT7uF+ccMWhLkh880Ocs4OdUfQoazAQufPqWDgbdtewL0SlngMtmZzZEiYt+ScM1MVrDeoliFJxNZcs0t97UUWSEfnTWDsRgFlGrthPWmCaH7A1XsQwxuAzr6QHD0HYm+uGV6YX99bRx7tkTTa5u3Zs1eqqqrkT+/9Sc6df1cFy7KolU2EGLt+ObHsQl729wtN3jjmfl9JrvD1XL9mjmPMz+j/UX/PcVkT5dwNjDK/N+aaidWjnmP2eAvNJfv6LnQ9MnE31m3oAFYD/3QtOl2QNuaffQ9lz9l4nc8vBxfjmmZil90Hi9oMar4JW8Nr107KBGyAWjd7VQG9ubl51TOIMrFYicd3kF+J3la/jwCsny5dOqXsmahgYKOlVBUtHLEBIz//7rdrJDdIVPR0X5cL547KJP7OjozALgubAl559cfzORyLrYzvf86c+VTOn/t0frwabCIpLsZ7GKXiyOnt4Xz3JpNFWZkyN+RBGu9xhrz3wbKT6g/mlzAjJDMYnv3y54bzo8Lj2tXLMgbVDM/LJEBoGcag8sznjDkVFqbnNxVJb3Dg8yVY82YoRUgm0QZraKh7XpmYSMSV/RUzTmjJtXnzbmVDptvSCIQQev7Znz/Fxh4osVu8cu1mn1yTO+8/MnvoHhmX3oGY/HffeQI2nXdUS0uPoo/QCGgENAIaAY2ARkAjcC8CKXw2utQ5Kh992SdTsTsOGMUem7y0v05aauY2dt976iN5hp9lKioq5W/+5m8eyXh6EI3A44qAGXEcubYlP+FyZ2K4cwAF5GmVc1AUmE73jZ39tDZ6cvdmFU49eO0acsbzFHnAsG+28k2blZLBlGqRYOenUlt6J3uAr/cEJsUSHUd2NVQkLpASMwXiBjEgHuRo4MtAX0DMnTFkdEBREA5hlx6K44UpZYvEfAU77IqCsFzquHBZxhMpkB9xfGiuEW9pCpkUJqUgmIWaoG/4plRhvKJK5HxEA4qkccNmyeXA7kCMTXUBZoEj8I+p30NR2RQphKXSXjAjAbl1Lr0zctZXICWcn9essk6oYkhBoZKPIPEtNduULUM/PpAPXb+glAvltYdks2ev9MA6Yaj3AqzB/Gp+lvpNUKQMQo1AnMpQaE8Hw09BHTManpESM0gSkDmCTZW0VypO+MXqHlUZHabRaQnNJKBmaMbORjPsryblS3ygbHSHlPVWGJhQthIaTudZkKCwuaG0gCXVSCosoUtDsPyyiTkyIoUU50i1uEfGZKCT13VI6qdnJFg0LoFAOqtjR9MTKhw+5ptV+SUN3irpRYiUHyqZ8OCgVEGNE5uMSqEnJrF8EloIbodapc7RLbO4tsWwzUrNwqoMYfNhKEWc1WbZJLCKQmYHVTEkp9hIjkh5Uop8NRINjYkzRdszqH9AbHn9VeKE5ZjJYleWXZUoPpQWFii1CPugdVYxurHtqJVEV0wan/DDtapGKS/yYa9WYC+FTZdLynF/srF/u8UhSX7fB7JsNiZFO7eIN458Fax7aPC2DI5cU6Hvk8GbEgNvZEGOB1UyZhBZJc4ysZlwXyObIjhXQxkcBSmF+yHP7xQPrKacMyBocD9M4DqzPZd8Tn1dzWbFfdWKDIAXY/DBBKnDZsM91AxlDV97lI1FLBIg3/nudxRZePLkKRBQnygihIWrqqomtcuWnvdGIS67APgo56vHSiPAoGE2fS1W944wrgNnwWBoBjQzAJq2clR8VFUWy4tHnpFdu3eBjPXlrPowvGj587nW2+OkAMmHFJ8qhO98599BvRNQfzuoRLiOwO/f/fb/VaTAa9/8q7vCzmnBxLDxr3/9e/LrX/0n+fTYW0r5QMKLFlZUMCzWSKTx9+u3ECL+9NPfUONRTcJckXff/gUCxG/fFTC+WD/G8ww1pwUW7SkZKv6gjedOQr1EAoh9kvhZqFFlYoS1LzbmQhZV/FvCsHUSJJmNYzU2bhN/RYN0gujg3yFi64LimfZX10BKRfFz1rplnzRDrUiyR7elETDh/UURNsz88KWtsnvXNrxfXFxl+v7xS/Le55dUJp9uGgGNgEZAI6AR0AhoBL4qAsmZWRkYjcixi8wHjqruuPmzttwlB9roHrL6jZ+5CmHfrptGQCPwaBBYkgBh7sbXXv8xiizIdEDYdhyFY0s8XfV12WcQ3F0g1nAcBcsasSZRknYWSyzZLfnucikzlc2voqKyUiLc6Y9miUBdYNSDw2MI6wyrIPFCD/IlsMNuHAV8nj9Uj939UZMUI1jagdB1iUzgA6ofVgVN+BBrkrHuK+o8a02TGtvsrVDHxRNJ/HJLL42PA5NVqj8HitcRS/38nDiP0TJkYGDHInMjrAhNJ3vEHX8IElHrZdtc24NdlUwRQbHJGhZrzCkxE743Dap+2U+JzwdmOSZTTVBaDAyhuF4G1QrUCwmbVJaDlNh/QFwgIrj7nY3WAJMTKJAjtJt2VWyBnrQNFx+bUuXi9EZU/6mxPIlPI8wba6f91x68Pp2H8adGxDWdnhfPKasqFJMrbWuVnEkXKoeTaTLKhTwVctzBbekQeZt/m/iwXjbbBMbZE1NrJAYOXGsn1sBWv2ULbDRqxAxcGRQ+kxiSAmRqVJch3aNhGAUXaFF27JfAjA+vpYvqBTVmKWyok6KpbihUisSDPsLjdmmC6iRkSqsvKgsrpQAfjtmmsXuT15ZtuHAaeDaJB1gZz4kDgeMIl+8FpjGoZ9g2oZBRl+cTY31l0+m1tJZvVa9H8ngt0/cA7wUHrF9MCFuNAcfb4W7xzPrEbkXQfCPIGayLzedDsQOEGo/nfcPWUAaSDb7fxCZmwrxMsMLitZ9rcUdM3Ve8Z3jvGPc4++c15uu8fmvhDxuzPg5tr5ZdLXf/wWc4Ol9bjUZLrH379kl7ezuKTNfl3NmzcqPjFlQhF2DrAqIPtlgkRFjMssGGTbfVQ4DB9cwNYHPBh1+31UPAuBaBQD92/Q+qv1+lZVapr/dCvfYSbHq2KiXbcto0fjcOgtS2oq/iUlgEPuRMoOXMbaFj1z5Fs9CsF36OhAQVHUaYNwPJh5BBduL4Oyrwm4HgziL8LUUQeFlp5XwnzOhg4Z5WWSQgWNivr29RioZstVD2yLTRev75b6unk8kZYfg482LeeOPvVMA41RwMGK+CevR+fWWSEbTCoiXWSjSD4FisL85PkS54/2D8Xlrs2KWep/qkqrpJ2VuRAKEtmGGDNQBiiMocYktbrZJibFpJy7+W6nbDv57Ee0Iv3l/bCmzSVmmHTZtZpufUTcZj42tVqSY+NvwNowHQCGgENAIaAY3ACiPgczlka51Pov50bYfdV2NjrMOmN7OsMNS6O43AukBgSQKERZDtTz2vFsMdlyw6ZO68LIO33iY8R/byzu7RfXct3jg+u2BhPD+LPoyWh354HPu63y5Uvp6HoqnRZ+bO1ex5zqba0nYj6JdB0Gz5+ADLsTj2QuMY4/Prpp377lqfMUdjHDWX+Xmnx8qcf/Zjjm8cz8fsh/PIazus5qYeZ+zAzcb8Do5uZMwT/XTLXLdxTIu0zvfPozKv0/yJeNA8N56Bo3FsZt9wmkJDgPr8iZvm74U7zxn3Rx2Oegrzu3+7c+XT91ZL1uGZr/v5mj+9Xj6fvvc2z2OViVP2PWT0w+fr8F9mv8b9R2wMDI1p8PvM1w18Mq9P5pSNY43Xs+eRtbxH+q3yu4SHWDbZYSpgGO3qBoGRGNy1ayfsRVpQ0B3BTvYhGegfECprIpP92H07K0H6mOm2KgjEYDV37Vof1FE9ylZmz55msTu1TcmqXAwMarfRghIEdb1N9u/bL2XlZeIvLwVJXgnS8MEKiVR78ueOP4skQNZ6y/wdvtbner/50bqMjUV1w8qOhC8JEcvh16HG6EI2xd/JmdOfyaGD35Tyssr540haMPdjbHQU180hk7A9O3/hC9mGAPMmhHsvlOGx0Hg8jsTKnr0vQAXZr0gAqkF6e6+rgPGF+jHWxHErK6G4hCpiaLBDKZGYV7KQAiMbBxIXM9isQVUGra4ym9oYcZ9G5cdylRgca6FG3L2eYqlv2Kpsrrq7LigbrAp/tQqjHxzoVBtxaNOYSzD8QmPo59IIkPAwmvE48zmNk0ZAI6AR0AhoBDQCGoGVQIBB59ubfFLu23ZXd6xvliNzWDeNgEZg4yGwJAGSCYlRVDe+8rXMQvBCRII6ZhFc5/vLKPYbhy7W12KvZx6fPc/MYnT2rtbFCtlGf5n9Zs/JGGehYxZ6jnPP7sPA5y7CYwE8FsMwE9rsdRuvqefnCJrF5mAcu9D8jDlmjnVX31kv5DLX7PMXK2Yt1hefX+jaZR6/0D2RPa5aWxbZlLmc7HEWw2e+3wWu3VLnZMH30L6l9yV9MLsG00osY6B6v0vaGqCysq6OCiRzwSy+1tXVQT2FXcd4YRIe4lTlTCO7hc1QNj00kHTHiyIwDsUaVWtUG1AtZajsFj1Bv/DQEDBBAUcFHRWBKxVsTrVna2urmjM3CKz1tvZnuDiCRl4LrcvYKvy1UoR8r8zGS2BDqHgprDON0O5QmDle6RaDDSgVG0eP/kblfuw78KLKA2EIOgmJTLWIMR5VDYkEbEVLKlWYeLaSgdkWPl+lUjuQVBkZSSsu75pY1jc8p7KyCX0i222oXy5e/Fza2kHKZShVFjqf8//88/dAuAzgvtuL/KftSo3rQr6WBUVyvs6ckuxGsoIqE4Mgcbm94lDWn0u3++WakEzZvHmXyh65cuWkssEqL69DJstZZX9VXdOmbMpoV6abRkAjoBHQCGgENAIaAY3A2kaANZgi+72bPznr9fBZZ22jq2enEVifCCyLAFmfS9SzJgJrvVi01uf3ONxF4WhCjiMD5r1TXbChSO+ELbSY5IlN5VLnd68JAsTA2bgfuJv9QXe0Pw7XbK2sIVsZtVbmpeexcghwc8B6+llbjDRfOUQeXk/T01Mo/v9R2U1RUfWDH/7P8sTuZ1D4L5xXd3D0RDKpwuwZCp7ZqNa5desybALfVETFC1/7vrz40o/kAtQgzAP5+MPfSEN9mxw+/BrIEYeyaLx0+aT80y//o8qMYUj6Sy/+8B6VBhUSzCBhwZ9KiEJLWml0PySo9CBxwHwM5pCcPfuRCgo3xjZULUYfJHZo8XX23Gfyu9/9TIWLv/zKX6pMEhI+DH0n4UM1CQkfpbDNIOS49ghyORj4TqKEoe08L5e2mAKE53IMElFbtu5SBAgzWCoqzsiNG+cVIbR9276crMVymYc+RiOgEdAIaAQ0AhoBjYBG4OEikITzy3BwSq52Maf2TnM6zNJU6ZVi99Lvcx/uDHXvGgGNwKNGQBMgjxpxPZ5GYA0gQOLDaPY1oPxYA5DoKdwHAU1Q3gcc/dKqILCe78l0GHehTIwPynWoJmpqNim1QS2+GlZQ0WhErl8/I1cun1Gh4KWljVBnlCuChNZXH374K7l65QvYNrXJoUOvgfBohT2TW7q6rqvMEJIjVVVNUFfshmrBBDLDpsiPixdOKHVII85Lqy7S+VkkJUiq0EIrEg5LA14vLa3K6dqSODh46GVlHXXt6mVF7LDt3HVYWUuR2GFj1sjEZECt67dv/kzOn/tUGhqbFVljt9mVFVZ9QzuUFrWw3+qRc+eOYY67QMYw0yQ9lakphpKfVK+Xl1emz0UWSiq1sL1V5gLupwDhcbS3amnercgfBtCz0f6K4ehUqXB+umkENAIaAY2ARkAjoBHQCKx9BBiCfu7GiPzXD67KePjOZqIK2F/99LVtIEDS2bRrfyV6hhoBjcBKIaAJkJVCUvejEVjjCLgdhXJ4Z5XUld8JcbfAd73YYxW+pptGQCOgEVgvCKxnBQhVE80o7G/bcViG3vsnRVhMx2Oy54lnFekwHY8iS+OmHDv2jiIJaC+1c+d+pVCgNdSJE+/Jnz97S1lfvfC178mmTe1KzcG8joOHvqFUC2fOfAwCpE7cIDsYZM6Q7127D6p8D77GRtKidM6qinZXn336rnrN4XSqQPA6kCr3y/8w7hWn0yP79x+RwNigvPnGf1YkC8mdPXuPyLZtB8Tl9ikCZhwh6ddvnJYvTvwBuSY3FIHx9a//UGWPWK0O1V0jskt27nwOBMd/UceVFFeoTBOSO8wMocXWn/70S3Us+9/adkDNMRcC5H4KEPbHfpgDUle/TeFAOzGqYZqbtyuFSi5YqInpphHQCGgENAIaAY2ARkAjsCYQoPOF4X6xJiakJ6ER0AisGgKaAFk16PXAGoFHi0ChpUDaG4qltdZ318BrIQT90SKhR9MIaATWOwLrWQFC7MvLKuRl2FYxzPv45+/K+yBCPvn4TWW3xMZAcyo/SH7Q4mr/gVeUXdbFi1/IB+//WgLBMTly5Ieya9ezyIHxqHNYoG/buhekwvdghTWoSBIW9EuKvy/FUI8899z3JIxMpWNHf6fsqljkN8ZjoZ/KD47HPJHnn/++uF13/61QgyzQaB9VWlKBef5ACgst8vHHbymi5c03/h9F7mSOQTsvWlxt33FIkTcHD76i8kiMxnlybOaVkICgpdfJk59IIWyumPtBlUkUmVAHnnwZx31X4ZhrW0oBwn5Kistky5ZdCpvBwV4oYZoVicOsE6pQsi29ch1bH6cR0AhoBDQCGgGNgEZAI/DoEGCNo6naI99/rkXiidT8wEU2s1SX5pYf9+hmq0fSCGgEHgUCmgB5FCjrMTQCawCBGfhgjk3GJIIskMzmwJuAYpdVmAGgm0ZAI6ARWA8IrGcFCPGlYmP79qegtnDJltbtcuXqeeRe9EDlEFLw09qqrLxaFd9JclDdYagcmFPBf4cOflMRAJkZGUqNceAbKuy8r69H5XgwA4RZIE1QV/zFD/4Xqa9vUVZXmeOZzUUIAK9VWRe7n3hB2XEtR/HAOVRX1csrr/wVMkD2ynnkkVy5cgaqj5H5NRljNDa0yp49L0hT09Z55Ydxz3FM2nb96Ef/QeFy9uwJ1YfRqM6gGoZzpO0XcWSjzVdVdaPKE2HjYz6X3RobtylCyQVyp7SsJvtlZYPFucWmIzI2NgKrsBalMmFIuiY/7oFLP6ER0AhoBDQCGgGNgEZgTSLA2samKo80VrrvmZ8OQb8HEv2ERmBDIHDvp8MNsWy9SI3AxkOAxMcnZ2/LqatDdy1+e1OpvPZUo7id2gZr490VesUagfWJwHpXgBB1FvubN21XZMMhFNsnUOiPwwqLjcRISUmlUh6w+E6CgceTNGlv36eOYYE/m6TgcbS8+u53/706hs0o3vNYvvbNb/5Enj78uiIWGCjOZgFRUlpWpTI7jPHmTl/Wl6Iir5rjli27oeK4e02ZYyw0d2Ogu3B5+jsyMnxb2YIVOb3K0qsEweeFhfa7iB+eQ4KHviO5sQAAIABJREFUpAjbQv0bpNP98DMIGOajGO2r4LEs8B7hwQyXT+P0OPwkPULg9FAaAY2ARkAjoBHQCKwLBPheJwr7qzBqINwIarRCs0m4AdQKdwzdNAIagY2FgCZANtb11qvdwAjEEjPS2ReSo+f65lGwmNOqj6/vhVf8BsZGL10joBFYXwisdwWIgTYJC6ozaqodSkExOyczyFR1ZF4ZFuiNoPTFFAk8l8X+hSybjNfKkP1B26rMeazUHZCe491rWmw9i43J45kLQlxqqonLYkfeeT4XbFbqmKVns3aPmJ6ehr3XoJqg3+/HvWIBYaSJkLV7xfTMNAIaAY2ARkAjoBFYLgIpvHm81DkqH33ZJ1OxOw4YxR6bvLS/TlpqvMvtUh+vEdAIrHMENAGyzi+gnr5GIFcErGZYi9R55MVYvUQT6TcBNrNZmms9wtd00whoBDQC6wWBx6lcm1ncz4UoyIUM4HVc6rhcxnrQ+2G5a1psnKXWkH1eLsev1DHZY6+X70mAvPXW23Ls2FGoidpl967d0tbeJqWlpWIp1HaY6+U66nlqBDQCGgGNgEZAI7A4AsmZWRkYjcj7p7skNHWHANmEXJADbeWLn6hf0QhoBB5bBDQB8theWr0wjcDdCFDqeWh7texqufsPPsPR+ZpuGgGNgEZgvSCQgyBgvSxFz1Mj8MgQ4M9NAhsgbt68Ib/59W/k3Xf+IA2NzVLTWCn7d++VA/sPSHNLs1gLC8Vmt4vd4dT5YI/s6uiBNAIaAY2ARkAjoBFYSQR8Lofsai6TYCgGy6t06dPntKH2YVnJYXRfGgGNwDpBQBMg6+RC6WlqBL4qAgUIAnM7LfeQHaaCPNFBYF8VXX2+RkAj8CgReJwUII8SNz3WxkYg++dmMjQh586dkksXTfL50eNSXf07qShH1orXJ7t375IDB56Surpa8Xq94nQWaausjX376NVrBDQCGgGNgEZg3SBgMeXL9iafVJXuuGvODEcvdlvXzTr0RDUCGoGVQ0ATICuHpe5JI7CmEaD35Y3ecekemJDp5Iyaa6GpQCpKHNLWUCIOq1aBrOkLqCenEdAIzCOgFSD6ZtAIrBwCyZmkjI0Nq3/n0K2pwCTvv/8RCJFfS2trk+zbt0/27NmryZCVg1z3pBHQCGgENAIaAY3AQ0SA+WZFdov6l93ydPZZNiT6e43AhkBAEyAb4jLrRWoERMLRhBw92ye///PNeTgKIQV9YlO51PndmgDRN4lGQCOwbhDI3sm+biauJ6oRWASBqakpCQaDkkwmFzliZZ4OBIISn44JSY/FWiYhcuniWUWG+LzFsvuJ7fNkSHVlhZSUlYnL5VqsG/28RkAjoBHQCGgENAIagVVBIJWalcFARG71hZT9p9GcDrM0VXq1CmRVrooeVCOwughoAmR18dejawQeGQKm/Hw1FkkPo9mQ/2HXyo9Hdg30QBoBjcDKIKAVICuDo+5lbSDAD+nXrl2XN9/4Z+nuG3yok5qeisrVq7dyHiOTDOnsvHEXGfL004dlc0uLNDbUKzJE22TlDKs+UCOgEdAIaAQ0AhqBh4hAPJmSczdG5O/fvSjj4en5ker9Lvnpa9tAgFQ8xNF11xoBjcBaREATIGvxqug5aQQeAgIMOj+8s0rqyp3zvVtggVXssYrbUfgQRtRdagQ0AhqBh4OAVoA8HFwfh17zFrg5ZtcBYzbQ3y//+vv3kcdxVl0Gk8kCNUh8xb9+lWucTYYwRL19R5t899vflde/+U1FgOimEdAIaAQ0AhoBjYBGQCOgEdAIaATWGgKaAFlrV0TPRyPwkBAohNqjvaFY/ctuDEjXTSOgEdAIrBcE1kE9e71AuWbnOQvWggTADKyaCpBJQUIgbyF2Y24FxvGh8KSMjvZLIh5Xr9gdLikpLhO73YV+Ctbsep1FRbJjd7s4iu71ql7JSUdCcRkbHZXevq4H6pb5IG6PD/kgtWq+ba2tKh/EjaB03TQCGgGNgEZAI6AR0AisBQRMBXnSVO2R7z/XIvFEan5KRdgUWl2qN2yshWuk56AReNQIaALkUSOux9MIrBICMzMpmQjHZSJyRwLKqVAZUuyyIvRUkyCrdGn0sBoBjcAyEVhgk/8ye9CHL4YAiYTp6SmZik6J3WYXq9Wx2KEP7flYDJ7Nty7LhQvHZGIyKC3Nu1FkfxZ5E15ZSM3BOY+MDsjZM0dxznG5ffumxGJTan4eT6k0NLbIjh1PS9vWvVJUtDYL9W1tW+Xf/w//I+Z999/olQY5GAjIL//pl/LP/9y1rK5dRW7g2KxIjx3t22T7tu2yZesWKSkpEbPZIgwb1U0joBHQCGgENAIaAY3AWkCAtY3mGq9sqvLcMx0dgn4PJPoJjcCGQEATIBviMutFagREIghB/+B0t5y6OiTR6XT4qa3QJNubSuW1pxrF7dQ2WPo+0QhoBNYHAloB8vCuE1UXZ899JufOHVOkwd49Lzwy5cTMzIwMDN6WM2c+lg/e/7VcuXISSo5p+dqRH0hb+35FgGQ3kh99/d3y9ts/k48//I1Eo1HxVzRIub9WpmNRGRnpkKtXvpAzpz+TV179sTz7zLfWHAlC8sDr9YHk8UkK68mH0oVfV7qx35HhIfn4k4+x6cF03yB0vm63O+4hPZpbmsXv9yvSg00THyt9lXR/GgGNgEZAI6AR0Ah8VQT4Lmo6PqNqIMnUHQUIc1G5AdQKdwzdNAIagY2FgCZANtb11qvdwAjEEjPS2ReS45cH5mWgFnNa9fH1vXXi3sDY6KVrBDQC6wsBvdf83utFd6jFauYLOUctdCyPo4UUyQ+SCSXFFfLE7mfuS4Bk971Qv/fO9t5nqPq4cuW0fIhxT518TxEZJD/i8QQUKbF7T5h7hmqVE8ffkXff/oV65uVX/lL27T0ipWU1kkhMS1fnZRT8fyfHP39X3n7r51Jd1STbtz913zUtOthDfMEgEvIlfXcbX1d6SJPZvGCXBuFBkqkYqg4qPfY9sVeFnBukR2FhoRi0jP4ZXBBG/aRGQCOgEdAIaAQ0AmsAAbpfnL81LG9/1iXRRGJ+RtWlLnlpf520QB2im0ZAI7CxENAEyMa63nq1GxgBq7lAWus8MhWrnX8TYEMhpLnWI3xNN42ARkAjsF4QWPm98etl5YJMjBkU9tOEALMx8vNNMjU1KYlkUswm7tq/k3Vh2FklEnH1utF4nNVqxy7+O8o/HsvjmJ/R3387TUBgHFpJMYfDbLbeRRoY88jsm/1SGVBYaL9vXkc22iRRRsdGFPlx7OjvpL6hTXbtOgQlyKdy/tyn2YfPf88587zzF76QSVhl7dv/ohw58pdSX9eixme/1VX16vje25eks/OSXLt2UjZtal/UTmvRwR6zFxhobpAe5eWVsvuJ7bJv3z5p3dwqzCOprqwQf2Ul7if7XSvXxMdjdiPo5WgENAIaAY2ARuAxRCA5MysDoxH58+U+CYRiYp7Lgaspm5ADbeWP4Yr1kjQCGoGlENAEyFII6dc1Ao8JAkV2izy7q0b2bvHD9iJdPmQ4WKHZpGSgumkENAIagfWCwEYtwrLgPzEZgELiAwmFAtLauhcKiZh8+unvpa+vW3bu3C/Pv/ADKSutVOoHBl1fvPi5ysUYHupVl5eZHqWlFbJt2wHZueuwFPvKFbFBJQWtr05+8b5cv5omFM6ePYHn41JRUa+OZb9sVGv0IGfj9JcfyJWr52VifFQ97/aUyJbW7SimvyC1NZuWlR/C+VosFnnha9+XQ4deE4+3TDo6r6p+79cmxkdkaLBHHcKsD7fbp2yZqEThP4anV1U3wvaqXCLhy8CpR6KwxlrITut+4zxOrxUXF0vzpi3zpAfzPBob6qWkrAy4uB6npeq1aAQ0AhoBjYBGQCOwQRHwuRyyq7lMgiBAjFbpK0LtI23juUFh0cvWCGxYBDQBsmEvvV74RkTAZILnpfVusoPP0RdcN42ARkAjsF4Q2MgKkImJAJQSb0p31wXZA6unkZEBlXHBRmIjHofaA2TC9evn5V9+9bdy7swnYrPZxO5wQJ1RBLIgnYlBm6lnn/+uvPLKT5RKgqHn166elj9/9hZCxLuFWSDslxkaTZuegA3SLgwAi6xQUD7//I/yzju/gL3Upfm+OT775ngkTl7/9k9k546DOZMgJcVl8uprPxGHo0iRMmOBoZxuRzNIE6pZcmkWi1mRLCbTxiX9eS+88sqrcvjwM1JXV4vsEa84nUU6yyOXG0gfoxHQCGgENAIaAY3AukDAghrH7s0l0lRVNL/5kxPnBlCfy7Yu1qAnqRHQCKwsApoAWVk8dW8agTWLQDSelBMXB+RiV+CuOTZXueWpbZU6BH3NXjk9MY2ARiAbgY1O2VKBMTTUL5cvf6ZIjacPf0vq61ugdNgkbpdbguNjIEl+pXIvfN5i+d73/1q2bT8IOyM3QrBvK8XIB+//SuV8VFY2Ievj+2K32ZGNcVBi0xGVp0FLqR27nkEw97MgVipxTJkiVi5dPilvvPF3yk5q165n5cUXfwDLqq3KluvatTMqvJwh5mw+EBlNje1L5m1QqeF0etQ/WlexkcjJpXk8pVJWXi1mS6F0dlxXFl4+b6myv2KjRVdfbweImyGl+qisrFdrfdCsklzmtJaPsVptuBe2q40POsB8LV8pPTeNgEZAI6AR0AhoBB4UAb7H4cbP7M2fD9qfPk8joBFY/whoAmT9X0O9Ao1ATgiEowk5eRVBYCduzR9fiOyPp7ZWye7Wch2CnhOK+iCNgEZgLSCwkRUgBv4MB5+KRORbr/9Q2V65itwgGtKZIAOwhBodG1bkB1UeTx9+XcrL0vZVFf4aqCCscuvWZZWvcf3aWdm//4iyt2pv3yfT8ah8ceIPsImaUnZWzz7zLaWwoJXUyOiAssgi+dGAnA6qPBiSbgH5QEKBtldOh1uRDVSP0CKrwl+bU96GQXxwfbmKEnmO11MsTx54UW7d/FKN+dFHv0Z4ehx2XKWKsBka6lYh6GOjo9K6ZZ8igphnslEbCwIPK2B9o2Kq160R0AhoBDQCGgGNwNpCIJWalcFARK5mbf50OszSVOmVYvfGfS+4tq6Uno1G4NEhoAmQR4e1HkkjsKoImPLzxWY1SbnXLtH4jJqLzVIg9ixLrFWdpB5cI6AR0AjkgMBGV4AYELk9fmR5PC2lJRXzuRd8raSkUl5++d/KwadegjpjiyIJDMVDPv4WlJZVKdUEGy20IpGQ5JWJClQ3mk2FpFsV+UGCI5mcgXqkTy5dOqUO2bX7oLRt3auC1I2+mS9Cq6zqmjaoMX6rrLAOPf0d5G945/vN5QH7o1VVLo3jU6USjkzI22/9XKlXzpz+TMpBvEwj64MWXgb5QcKGJA0zT3TTCGgENAIaAY2ARkAjoBF4PBGIJ1Ny7saI/P27F2UoOIXc0/R7v3q/S3762jYQIBWP58L1qjQCGoFFEdAEyKLQ6Bc0Ao8XAgw6P7yzSmh5FUchi81iKpBij1XcjsLHa7F6NRoBjcBjjYBWgJAgMKsiP8kMKiEyLZ28CCPfu+cFSaWSCCyfkqHhAZmamkAI+KS6L8YRWj45EVSPaae1VGPf7Gt4uBeB54Mg0+2wxKqQRDKpMkGyW2VljbKkGkdAOUPKqTpZDulABUiuFlgMhjebLUp5QjssqlMGBzqh/gipaZH8YGM+SpHTq1QyumkEMhHo6U9bg3p9Pp2Jpm8NjYBGQCOgEdAIPEYIGBs/pxMz8yTIY7Q8vRSNgEZgGQjoT4HLAEsfqhFYzwgUQu3R3lCs/iVn0uVDhoCxFRTkr+el6blrBDQCGwwBrQBJX3BXkUcFepMwyCRAUqmUChG/dPGEXLz4ubLDmgDpQTKEjeQAiQHaaOXS2P/MTFICgQGJRqMqH+TYsXekq+v6gqffuHEeZEtYjRMK30uQLHhSxpOZa1nq2OnpKTl69Pcql4TkzDPPfhuqmAMqt4Str/emnDz5iRw7+juldvmLv/hrZfVF5YhuGxuBUDQup7E7tHtkXJ547jvS1rZV56Js7FtCr14joBHQCGgEHhMEWOfYXOeTn77SLiFYgVvM6XpHkdUi1aVFj8kq9TI0AhqB5SCgCZDloKWP1QisYwSmYXvVPxqW0Yl0AcxYisdpldpyl1hBkOimEdAIaATWAwJaASJKYUGbKLPp7rdyVESQ/Hj3nZ/LH979uQpLLy+vhFqkUSlGjDYV+eL/Z+894KO8znz/x9LMqPfeOxIqSPQuMHbAxiaOK7bj2Mna3mya72b/2/fu3t3Nvf5ns8kmN8lu4k2cjRNv4oJbMLbBNgZMBwMCBAhQQQj1Pqqjwj2/M7yjd0Yz0qjPq/c5n8980LzllO95xYzO7zzPb1JTDW8QCCdVItKiob7CZR1JSSlChEgnH5Ofy2tcnXDXAwRjrb5+VRi+vyUjPyB+PPbYt+3SXA0Ij5L09AJ66aV/kebsCQkpwiw+Q3qecNE3AYgfJRevUt76bUIY204RERH6BsKjZwJMgAkwASYwTwgYxAbP9PgQShOvm8IPRCm3CS803kg1TyaZh8EEJkiABZAJAuPLmYBWCXSLnQ8ff1ZN+0tqbEOAB0hBejR98XM5LIBodWK530xAhwT4DxfXkz44aKFjx/ZI8aO1rYXWb/iCNDJPSEwnf/8QeSPSYe3Y8TMhjrziuiIXZ5D+Cq+773mCCgvXu7jKehjiR0pqjthVP7EoQ3cjQDDWqsqLdK3qnOzTsqUbRbqtRLt0W4j0yMzMp4yMXLp48YT0MIGXCXxT1MbrYw6ET84rAoj8uHqtQYgfFbfEj0cpLo4FsXk1yTwYJsAEmAAT0DUBmKCbeyzS/2NgaFiyMApRBMJITLg/BbAPqq6fDx68PgmwAKLPeedR65DAoEiJ0trRT9UNnWQZsH4JQChoWJAv4RwXJsAEmIBWCHAEiOuZMgufj8tlZ2Tkx4LsJfSF+56mRYvWiFRZ1ig/iAuNTbWyAndTYOEeeGcEBYXbGo6Pz6ClIroCBuljFXfFDHUd7kaAWNNy1UqhJzwsgoJDwqVxu2OBR0hwsLXvSloupAmbiC+JY538XpsEFPHj05IR8SMpKZlTX2lzOrnXTIAJMAEmwAScEoAJ+tnyVnrtk4vUZxm0XRMfHkSP3LmA8lI56tMpOD7IBOYxARZA5vHk8tCYgJqAr9GbclJCqcUcZzvsZzRSVnIo4RwXJsAEmIBWCHAEiPOZQkqonp5u6jS3ywusJulJUvxQhIihoSEZAdHYUEOIoHC3eHkZREqrBIqIjKSammqqrS2XniKOXhqov6OzVdQ9IKIy/ETUSbDbQoNa+EB6L2dFfQ1EGQgeiP6AN0mvGDvM2p0JG339I2bvk0nL5awvfExbBFj80NZ8cW+ZABNgAkyACUyFQGtnN12qbqVWc5+sxujtTe1d/WJTaOpUquV7mQAT0CgBFkA0OnHcbSYwUQJB/ibauDiJli+MtbvVxyh29YpzXJgAE2ACWiHAESCuZ0otHPT39Qoj8n55sWKUDn+Qkyc/lh4eShkYsP5hiPeKOACvD6XgXogKUdFJlJiUR5UVV+jihdNUXX2VcnKW2EWXKP4jFZWXaO2au6i4eJu4N8BWl6sf0E9EdCilu9tM6L/6fW/viIgB4QOiTHh4HPn5+ckokOrqMmG6vo4iwqNs9yEFQl19jRBsrtOApV/6kgQEui/KuOovH9cWARY/tDVf3FsmwASYABNgAlMlEB4cQDnJ4aMiQMJDeO1jqmz5fiagRQIsgGhx1rjPTGCSBAwGr1H5LnGMCxNgAkxASwQ8NQLE22Akg4+/QNkpF/MdoyNmmjE8LRB1ER+fJE3S4Y1x/vwR2Q8II+3tTXTqs4+FGfhBKRr4+weQ2dxAN2oqKDIynkJupYkyGoOkWFBbW0U1N6ooRKSW8vfzp8iIKClqXL50XPppfPDBf8shRUUnyH9R/5HD70v/EZRlyzbKf8crED8qqy7RubOHhGBjFWM6OtuoqalCpumquV5Ke/a8LPoXJqsKD4+n3LxVwu8jibIWLKaCwmL6aM8rdODAGxQeEUv5+aspICBIXqv0qeT0fpEGK4yKilaK+0bM4Mfr20TOQ2xRBCeDgzn9ROrha6efADw/OO3V9HPlGpkAE2ACTIAJeCIBH+F1uiQ7khYkrxLfyUfSfXsLD5CQgLHTt3rieLhPTIAJTJ0ACyBTZ8g1MAFNEOgVuS+Pna+j81Wtdv3NSgihNQVi4SuQvwhoYiK5k0yACZCnRoDAWDEiJIj6+stFGqoO8vUdP/JhstMJgcJZQcopmJMjQuNsyUH63cvfExEf+8jHx1emvULaqry8ZbQwdzG9v+tlGc3xzjsvUmtrHW3e/JiM8kDqrPPnjtHhQ+9Sc0ujFFRuv/0hykjPp6LFxXTX3U9KkWPPnt/TlStnKTllgewK6kdkCcSVjZsepMWLN4p2IQiNXSAWlZWdpt/+5l+pUwgfSkE6r0FxrvT8GdlPpcDbBH4fEEBiouNo06YHqLOjTQg7++R4U1ILRKRHHPVb+mx9wr3ri++jlavukWm5prsgSsYsUo+Zza2iTxxhMt18J1ufOvIja+VdtH37o8SeH5OlyfcxASbABJgAE9AGAWyW8hFpvr29vGhI5XeK9ybeAKqNSeReMoFpJsACyDQD5eqYgKcS6OodoBOXGmnXsXK7Lt5emExLcmIoxFM7zv1iAkyACTgQ8NQIEHQzOSVJmIWfElEV5WIhPN7mvTFdk4jIhlWrNlF0TCJlCyEABt/qglRVebnL6T5hfh4SGkkN9dUyggJRHaGhUbR+/VYhAmwVXiEdZOnvp9LSkzIKBNEeA4ODMspj44b7ZPopRGDgXpQBi9UvJCoyju7e+iTFxaXSocMfyPrLr34mr0EbhYs30KKCFbINCBSIShmvwMsjKiqeVqzaQv391ggQtO8jolkcC44nJKSIKJCYW236SDP2oMAwWrhwsYhMOS2jPtB3V31y5hHi2M5k3kMs6rfcoOwFKwjRQFzmloCj+PGlL32JxY+5nRJunQkwASbABJjArBDAZqn+AeFLJzw/1EWJAPEVESJcmAAT0BcBFkD0Nd88Wh0TMIjdDn6+BgpWhXz6iQ9+f19epNHxY8FDZwKaJOCpESCAmZKcLBbnw6mi4pxYkF8yrVEgEBMixML/gw9+Q6bYgnBgMJhGiSxI9bRu3VbKy18phZh+ixATTH4yugMCB6IyhsVuuO2P/qntPASFIOGNgXRZq1dvptS0XGpouCafD5xD2igv8TmCPkQLseL22++X0SAwVDd3WaM20EZCYgaFhUbIfrkjfqB+tAkRo6hwrdvPIzxAFCED98OLJCMjV0SsCPGj8bocMwqEEaTommif3O7IrQstIiKnqvICDQ/1UEF+HiEaiMvcEWDxY+7Yc8tMgAkwASbABOaaANJe3WjqouMX66m3f8RjLjrUT3qixkcGznUXuX0mwARmmQALILMMnJtjAnNFIMDPSMVFCYSUV+oSEerLeTDnalK4XSbABCZFYPyYgklVOy03RUXH0MrlhfTajv0iGuEULVq0xrZQPx0NYNHfYMCuNZ9RwodS/02hEEEUgFCBiA2lqAUJ1OPqPFJ3paVmU+qt1FbOhAylftRxEw3eKrgW753dM9b4rT4l1jGpg0ZUVduM3HFefRz1YjwwW0+I9xevFKdjHqv9qZzDeOGVcvnKKbp943JKSEqaSnV87zQQUDw/kPYKkR8pKSPPxDRUz1UwASbABJgAE2ACHkxgcOgmXaxqoZc+KCVzz4Ctp5mJoRQXGcACiAfPHXeNCcwUARZAZoos18sEPIwAjMAWpUdSflrEqJ4hFJQLE2ACTEArBDw5AuQ2r9toxcoVVHb5Mh0/sYdCw6KlkDBRQWCsuXBc/B/r2vHaHev8WOfUbTpe5/h+rP6pzynjcjW+8c6jrsm27W4fnV3X1FxHx47uEpEmvrRmzdpRacmc3cPHZoaAY+TH449aPT9mpjWulQkwASbABJgAE/BUAiaxYSgmPEBEQY+kwQoTvqcmIy+Deuqccb+YwEwS4N/8maTLdTMBDyLQbxmi2uYuau7osetVaKAvJccEE+fB9KDJ4q4wASYwJgFPjgBB34KDg+nuLZupre0VOrD/LaIN91NyUua0RoKMCYhPzgqBoaEhamltoE8PvE3DN1vooQceFinAkkSqME9+QmcFzZw04ih+sOfHnEwDN8oEmAATYAJMYM4JwOi8MCuKAv197friI6zzkqKC5rx/3AEmwARmnwALILPPnFtkAnNCoFuYoH/8WTXtL6mxa39FThx98XM5LIDMyaxwo0yACUyGgCdHgCjjSU3PoEcefohee30H7dnzMq1YvpkyM/PJ3z/4VgqryYyc7/EEAsPDN4VZew+Vl1+gkyc/JqPJTNsfuY9ycxey98ccTZCj+KFEfrAYNUcTws0yASbABJgAE5hDAvj8jxM+H7EiAsSxIFqbCxNgAvojwAKI/uacR6xTAoPC8La1o5+qGzrJMjAsKZiMXhQfEUA4x4UJMAEmoBUCWvizBX94ZWZlS/+BPXvep4MHd0pj9Jyc5dKU22AwktHAX8O08syhnwODgzQ4OEDt7U1UVnZamp4vWhRLmzc/RjkLc6X4MShMN4fENSg+Pj5aGp5m++pM/IAAyeKHZqeUO84EJkSgv7+fvCzdNNDTN6H7+GImoHUCRhHd4B0Qxp93TiYSm1WaOnrpWn2H3Vmkv0qLDaYQkQqLCxNgAvoiwH9562u+ebQ6JuBr9KaclFBqMY8Y4voZjZSVHEo4x4UJMAEmoBUCWogAAUsswCYL8+Xt2x+n7OwSES1wgj4+8VvysYRQSHCY8IqwD8vXCn+99nNgoI86Otuoy2wW0R6R9MQXN1NBQb4QtGJsiw893V0iMqSCwsPDKCkpmRclZvhhUYsfKUXrCZEfLH7MMHSungl4AAGIHpamC9SQXv16AAAgAElEQVRx4Dw1DQ0SXav2gF5xF5jAHBBISaZokX4zLDeX/GPi56ADntmkZXCYSsub6VfCBL29y+oB4ic8URER8vS9BVSUGeWZHedeMQEmMGMEWACZMbRcMRPwLAJB/ibauDiJVuXbfzGC+IFzXJgAE2ACWiGghQgQhaXiCbJm7VrKy8uluro6qq+rp6bmRurvs1D/kPVKr5t9NHybVRDhnz2Pg4/cJ+BP0dE5QtRKpqjISGFwH+Y0ysPI5pqz9l/J1WvCg6WkgiB+fPmpp1n8mDXy3BATmBsC2NXdXXuGrn5yxip64P9jbwPdJj5jgzjqbm4mhVudMwJmIQTeFN8rGw8dkq/ApUsodd1aMgaFz1mfPKlhbJJoaO2mVnMfGb1HNnxaBoRoyoUJMAHdEWABRHdTzgPWMwGDMANz3G+MY1yYABNgAloioJUIEDVTpEeKiIiQr6ysLIKBNsqgSKtkuJUKi3+2fi31JA5KX5R/vcUf0EajyWVkR2BgkJxfb5HijFMwzdz/Ko6RHyx+zBxrrpkJeAoBRH00H/5ULvRGpadT8KZN5OPnT/h/WflM9ZS+cj+YwGwQwPMvdmNQlEjD2dzeJn83zn92irKffVb30SAGb3iABNCdS1Kpp8+amhRzEhHqR6GBjisiszFb3AYTYAJzTYAFkLmeAW6fCcwSgV7LIB07X0fnq1rtWsxKCKE1BfGcB3OW5oGbYQJMYOoEtBQB4my07A3hjMr8OAbRg+d35ueSIz9mnjG3wAQ8iQD8lar37qEusbgbLaI9IsWir1JY/PCkmeK+zBUB/E5E3nMvNV28QGW/+AVlfvnzFJS4eK66M+ftYuPRkgUxlJcWOaovPpz+exQTPsAE9ECABRA9zDKPkQkIAl29A3TiUiPtOlZuZ4K+KjeOluTEUAhTYgJMgAlohIAWI0A0gpa7yQQ8nsDpS9c57ZXHzxJ3kAlML4GGA3tt4kdMRCRHfEwvXq5tHhEITk2Vo7n66z9Q/v9I0W06LPytMDh0U0RaD4+aXRZARiHhA0xAFwRYANHFNPMgmQCRwcuL/HwNFBzgY4cjIkiEznJhAkyACWiIgNYjQDSEmrvKBDyKwETED+wYHxocSXsx3kCQtgw7RrkwASbgWQR6Gmptaa9Y/PCsueHeeB4BpMVCSqymigpq2LmL4h99QpcpOYfEd4BL1a104MwN6u0b8fwID/GhO5YmU3o8b//0vKeXe8QEZpYACyAzy5drZwIeQyDAz0h3LEsipLxSCnY/hIs8mCEOoojHdJo7wgSYABNwQoAjQJxA4UNMwMMIVLd2UlVDBd25Ko9iTMYp9c7R8+OJJ54a0/C8paWFPtm7j86ePTluu+auPvIPjKQtnyumdevW6XKhaFxIfAETmCMCMD2/ceoz2ToWdTnd1RxNBDerOQIZwiOnfO9eMlw+QrE5azTX/6l2GNEf1+o6aOfhq9IEXSnpcSGUkxzJAshUAfP9TECDBFgA0eCkcZeZwGQI+Jq8aWFyuHw5FjZqdSTC75kAE/BkAhwB4smzw31jAtNPQO35AfEjMyvbpVAB8WPnH3bS0d27KSTUl1L7ol12qKSvmnYdPCrPR4T50RrhLeBF/D+MS2B8ggnMMoG+pjpb6qtZbpqbYwKaJiAN0kWpO1FGYWlLdelPZjJ4C8Nza/YLH5N16RPvfUyanlruPBNgApMkwALIJMHxbUxAawT6LENU29xFzR09dl2PDPGn+MhAgkDChQkwASagBQIcAaKFWeI+6pmAwWikoEBfSg6Io1C/qX2/UKe9moj4URyUT9sKN1Jg9IhZsjInXY1tJJJi0LHf/4TixGJIUGCgnqeLx84EPJZAZ1uV7Jva9NxjO8sdYwIeRiBaiPqXDxyga1WVlJWdoyt532QQJujC5zRCbITot5BN9DAZDZQWG+xhM8XdYQJMYDYIsAAyG5S5DSbgAQS6hQn6x59V0/6SGvElwJoHEzshClKj6Jlt+SyAeMAccReYABNwjwDvz3aPE1/FBOaKwODAAIWZTOTn7TelLkxW/Lg7Yjltyl/pVPxAhyB+/Gj/i7Jvt29cTQ0XWsXuWN8p9ZVvZgJMYPoJmCtbiFKSp79irpEJ6IBAkI81+qH9RiX1JiWTv79+vD+R4SImzF++lIINVPw3hA4efB4iE3BBgJ3+XIDhw0xgvhEYHB6m1o5+amjtpoa2HutL/NzTN0A4x4UJMAEmoBUCHAGilZnifjKByROYrPiByI+xxI+yxlopfjTWVtPm7BVUOJQ5+U7ynUyACcwYAfh/dDU3U5S3gby9pxZJNmOd5IqZgIcTCBW/O+11DdTW1ubhPZ357rH4MfOMuQUm4MkEOALEk2eH+8YEppFAoDBBX54TTX6+9r/2MEX3FWboXJgAE2ACWiHAf8BoZaa4n0xgcgQU8SMyewM98cSX3Pb8GC/yQy1+PJS9lbKjU6imvXFyneS7mAATmFECwzd5u8OMAubK5z0BxQcEA+3uMtPg0DAZvPWxBxr/ewyJ8SILhroYRGosH7H2oRcO8/4h5wEygQkQYAFkArD4UiagZQJ+It3VmkUJtDI/zm4Y3l5ehByZXJgAE2ACWiHASyJamSnuJxOYOAG1+PHVZ90XPyYS+aGIHxPvHd/BBJjAbBEYGrRfuJytdrkdJjBfCPT3jnh/dvf0EX6nDN7WtFjzZYyuxgHxo6K2g45frKfefmv6b1wbHepHyxfGSg9ULkyACeiLAAsg+ppvHi0ToMFBh3RX8n8BFkD40WACTEA7BDgCRDtzxT1lAhMhoIgf4ZnL6StffmzMyI/Ozk7a+YeddHT3bpKG50ucG56jfWeRHxPpF1/LBJjA7BMwGk2z3yi3yATmEQF1BMjAgHAC11EZHLpJF6ta6JW9ZdTZ3W8beXJMMMVFBrAAoqNngYfKBBQCLIDws8AEdELA3GOhzy410PmqVrsRIwXWmoJ4CgnUx24QnUw3D5MJzGsCHAEyr6eXB6cjAv2WkR3epVVNVHKxgiB+PP3005SzMJdgYuqstLS0TFj86KvtIY78cEaTjzEBzySgtwVbz5wF7pWWCagjQLQ8jsn2PcjPRHHh/uRnsqb79hEZMeIjAshk5GXQyTLl+5iAlgnwb76WZ4/7zgQmQKBvYIhOXGqktw5esd1lMnrRqtw4WpITQyETqIsvZQJMgAnMJQHnS6Jz2SNumwkwgfEIqMUOXOtjMspb+oUGcvFKM5k7blLK0hVS/MjNzXMpfiDyY8+e3TMS+aGn9CDjzRefZwJzTYAjQOZ6Brh9rRNQR4BofSwT7T9SfOekhtOTWwrsbvURgWVJUUETrY6vZwJMYB4QYAFkHkwiD4EJuEPAILw+wkN8KDMx1HY5dkMkRgUTznFhAkyACWiFAEeAaGWmuJ96JWAwGqn7NiPtOVJKDe0jqSec8bhWc4Pah7xo7S3xY7zID4gf+9/Z6XbaK3ciPzqpmdraWoVh6pCzLvIxJsAE5oAAR4DMAXRucl4R0HMECCJI4fMRHeZvN6det/E2qnn1kPNgmMAECLAAMgFYfCkT0DKBkEATbVmRKk2/LANWIzCEfwb7myjAz7oLU8vj474zASagHwL8p4t+5ppHqk0CgYGBVFy8gcLCwuUAjMPdNOAl0k6YvMhiGfEiw/EFKSso/3Yv2lC8cVzPj4mKH4211W6nvbL0d2sTNveaCcxTAhwBMk8nloc1awT0HAEyPHyTkAK8oa2HBoQhulKM3l6UEBVIAb68/jFrDyI3xAQ8hAALIB4yEdwNJjDTBAziwz4pJkhEfATaNXWb2B3Bi4kzTZ/rZwJMYDoJcATIdNLkupjA9BPw8fGhNWvXUlFRoVuVGwwG8vX1GzPt1a5d70575IfSuWCKpNCwKPL2tuYJd6vTfBETYAIzSoAjQGYUL1euAwJ6jgCxDA5L/9O3Dl6ltq6RSFR4gDz+uYVUlBmlgyeAh8gEmICaAAsg/DwwAZ0QGBQ7H2qbu6nV3DcqAgThob63zMF0goOHyQSYgIYJsGir4cnjruuGADZeBAcHT3m88PyYqPgxkcgPpYM+Pr7kbeAdoVOeMK6ACUwTAY4AmSaQXI1uCeg5AgSTbu61UGW9SHLZ3iuMz0dSfivZMHT7YPDAmYBOCbAAotOJ52Hrj0BLZx/tPl5Fe05U2Q1+2YI4emZbPgsg+nskeMRMQLMEOAJEs1PHHWcCEyIwW+LHhDrFFzMBJjArBDgCZFYwcyPzmICeI0AM3rdRSlwIbV6WSr191vTfmGp4okaG2PuCzONHgIfGBJiAigALIPw4MAEdEWjt6Jd5MJXiY/Smnr4BGhweyYupIxw8VCbABDRKgCNANDpx3G0mMAECLH5MABZfygTmIQGOAJmHk8pDmlUCeo4Agdl5TnI4pcUG0+DQTYIgohT2P53Vx5AbYwIeQ4AFEI+ZCu4IE5hZAr5C7FieE01+vva/9lkJIYRzXJgAE2ACWiHgyREgLS0t1NHWRpHR0aPS//T391N9fT01NDRQZ0enMIO2aAU59/MWgcAg4aUVH0ex8fHk7887CGfqwWDxY6bIcr1MQDsEJhMB4o6Pz9DQkHYgeGBP1Yw9maVW+ulqitH/qfLVcwSIl/A5hegxZPAiYTNmK95eXgRxhAsTYAL6I8ACiP7mnEesUwJB/iYqXpxIK/Pj7AjgS4AP+3/o9KngYTMBbRLw5D9burq6qKKyinz8/GwCCISPK1eu0LlzZ8W5OmptbaWW5gFtwtd5r03C3Dsq2peSUxJo8aJcysnJGSV06RzRlIcP8WPP7g9nzPB8yh3kCpgAE5gVAhOJAMFCb0NHu9v98vf1k9dGhobRdCw0u92whi90xhgcwdCTilb66chMeQ6b29tEhoZeeRp8g8T3DkRyTOY51XMECPxPK2o76PjFeurtH0mBFRJoolW58ZQcE+Q4BfyeCTCBeU6ABZB5PsE8PCagELhN7IIYtHCqK34imAAT0D4BT44ASUhMoqioKDIJQ2UURIQc/PQgHTh4nMxmL0pLzaPCRcViET1B7EgzkvHWtrSBwUH5M//ruRwGBweovb2JbtRUUOnZMrpUWiLmspBu33Q7JSUlE3YbcpkaAUX8+OiNHVQclE/blmykwGjni2tljbX0o/0v0mQMz6fWS76bCTCB2SDgbgQIFobfOLCX/uGnvxa5/kdS/Y7Vx/TIMEqMjqDo+GRaWZRD6wqXUHJs/Fi36PocGO89c4q+9fyPbIwHLP20ZvUK+uVf/6XHiCBKP//yhy9Qa1uLbc6efmgr/d2TfySFBE8r6HNPl5mOVpbTx5/upysXLtOJilrJOTwsgjLSE+lzyxbRtnUbJvyM6jkCBGmvLla10Ct7y6QJulIyE0MpVaTFYgHE034TuD9MYOYJsAAy84y5BSbgEQQ6u/rps0sNdL6q1WYEhnRYKTGBtHFxEoUE+nhEP7kTTIAJMIHxCHjyMrPBW4Tai9RIw8M3qUGku/rDzrfpxMkbQvgooOLiVRQTHScWyg3i5UW3cQj+eFPtUecxXVGRcWIxIp86Olvp8uXTdPjop3SjroUefvAeSs9cINIteHlUn7XUmcmIH321PfRQ9lbKjk7R0lC5r0yACbhBYCIRIKiuubmR+vrdE0BaWhrpRBnuOkwvve1PBalJ9LUvP0YPFm/yyEVyN3DNyiWdnW3Uae6wtWVpaZ2VdifaSENDrV0/O9r7JlrFrF1feeM6/eCl39GOj/aKTTONdu3i/ZWrF+mDPR/SzzPfo795evuEnlFPFHxmDaxoKMjPRHHh/uSnynYRHxFAJiMvg87mPHBbTMBTCPBvvqfMBPeDCcwwgb6BITpxqZHeOniFBm7lvjWKHSfFhQm0Kj+eQma4fa6eCTABJjBdBDw5AkQZY1NjA+14400qOVtHK5Zvpvz8FWQy+dBNLXR+uiZqntWDuYNohd2aEeFRtHzZHRQeHkNHDr9Pr72+gx5/9FFKTc/gSJBJzPtkxA+O/JgEaL6FCWiIgLsRIFP1SRgctNDpq+X0p8//kC5VVXlspICnTZ3Bm5eSpjonSHf1jy/8jH73zvvjVgUh5C++/x/UZjbTM/fc55ZQp+cIEJPw/sjLiKRnAwqoX2W5FxRgkMboXJgAE9AfAf7U0t+c84h1SsAgdhuHh/gQwj7VJTEqmHCOCxNgAkxAKwQ8OQIEDOH5sfeTvXbih9HI4odWni93+gkxBEIIokGMRl86sP8tevsPf6CnnnqKIiIi3KmCr7lFYDLiB0d+8OPDBOY/AXcjQPB/saviapF+cGjEE0D5GZENP/71a+QfEER//cRTrqrk47cIqBkylMkRePWjD+jND/Y7vRnPriNjRIT88KU3KD83nzbkFji9T31QzxEgSEsaHeZPUSFWvx+FC9KCe/rfEeNOLF/ABJjApAiwADIpbHwTE9AeARh+3bM6jZYvjLXrfHiQr0h/ZdLegLjHTIAJ6JaAJwdRIPVVSUkJnfyslBZkLZvVyI+bYlWe02rN7q8FFt6SkzJpmYgGOXjoD9Lv5a677yIfYVrKZXwCED8+2fsJwfOj0DeZPT/GR8ZXMAHdEJhKBAgWj4uLCulbf/Ql8g8ZiXPv6bCmb6oQaYcOfnqEPj5TapcqCSm0fvnqTirMyKC7V6/TDevJDNSVuDSZuvR4T3V9Le3avc8ubVtwUAg9tvVuukukTMVze/7Cefr5a+/JNFhKqblRRW/u3E2r0jLGjQLRcwQIvo+beyzU0NYjsl+M+KD6mQwUI9JiBfga9fjY8ZiZgK4JsACi6+nnweuJAPKSx0cGyhcXJsAEmICWCXjyzq0uYWR58OBB8rotggqL1onogJmP/ED6D3hSNDfXymiEyIhoCgpybhyt5Xn31L7LSJCMXLpxo1ya3S9bvowSEhI8tbse06/JRH5w2iuPmT7uCBOYcQJTiQDBzvnYxGjaVLSE/AODnPYVaYQU83QsKisFP7++Z7fYNJbntrm3syiUqaTmmu76HAE41j+ZvjpGJzi2MZX3jv1T6ppMP531w7H+6arXWVuujpVWVtDJy1dtpyEo/cnj98sUbMozu6mgiNITkuiZf3jezh/k1JnT1NDRTmni2R6r73qOALEMDkv/09/vvUS9liEbZ3iAPP65hVSUGeVqavg4E2AC85QACyDzdGJ5WEzAkcCg2PlQ29xNrWZ7E7hgfxMlxwSxcasjMH7PBJiAxxLw5AiQ8vIKulHbQunpGygsNHLGGcKYe2Cgjw4d2kW73v0N+fr606OPPSf9KRz/wJ/xzui4AT+/AEpNW0iVVaV08cJFiouLZy+QMZ4HjvwYAw6fYgJMQBKYSgSIGqGrBWIsDj++5V7pqfB3P/iZ3U783Qc/o4c3l44ZBYLPWNSNnfxYzG7rbJfNhgWHUl5aOiXHxsvPYVftO06zci18IUpra+jG9Wt29cWEhMqFcXfrc6xfea/ur9LXydQ93REg6vE31lyna+ZOG1P0PSEphdLCIybMVc0BbfSIjSrl5VfpzI1qO74TnS9XfCdy/N5Na6i+ppGud/RQqPcwPbq2eNQcQ4grTE2ivSqD9Lqufrea0XMECACZey1U19pDze29kpfJaE37bRkYSYHnFki+iAkwgXlBgAWQeTGNPAgmMD6Bji4Lvf1pOe05WWW72M/kTcsWxNGX78mlGJEjkwsTYAJMQAsEPDkCpPraNTKbvSgxcYFcAJ9p03Ol/u6uVqqsLKXwsAjq7enWwjTOuz7GxSaL6Js4OnvuLK0vXs9psFzMsCJ+vP/KK1QclM9pr1xw4sNMQO8EphIBMhF22++8S6Yi2vvZZ7bbOtpb6cjZM2MKIOdKTtMLu/YQduOX1dbbUmkhjVFMTDwtz0sVIsoWGYXizk78SpGW65fvvEmf7DtCFc1tth3/E6kP4sl3XnhRpEy6YhvLPVs20jceekwKNUr9Sn99ffwpMjKaipctpKcefsQtXwml4umOAHEcf3dXl02UQj8DAgPld5yJclU/C+8e3E+/eeU1OlFRS/X1NdJjA3yz42Pp9o2r6Zn7HpACi7pcOH+Wvv/G61KoUEpWZhb9/VefHjNCCLx/8NLv7OZi2bICem77F+V9967bIJ8vzFlPXy91NTeLaNJMtwSuuED30my689zZDXYevTF430YpcSG0eVkq9faNCB7wRI0M4XWPeTTVPBQm4DYBFkDcRsUXMgFtExgcHpYf/p3dIztGOsUaWU/fgLYHxr1nAkxAdwQ8NQIE+Ybb2trkfERGRM24+IF24Psx2YLoEWsdzmvA+YlUr9Q3Vp3OWyLhXTJ2X1zdpxyf6v3j1T/eecy9v3+wSD0WTu1tJeNdrtvzavGDPT90+xjwwJmAWwSmKwJkvMawGF28YY2dAIJ7Tp48R83b25wucv9u97v0/7/4qp03g9IOzNTxqqy8Qogk+dZT2+mZez/vtB7lnvePHKTnf/wzOlFWNqq7Sn3wgXh372GZJklZRFdfLKMbxEI6BBl1PVisxyL+d37xS9p5+LidsTY8TyAEvPZuDR04eZH++ZtfllEx7pTpigBBvyFMKON3Zv6NfuIFE3BwQF//7KtP0tcFV3eKua+DfihEjJ+89KpdKincC74nyvAqk3P+V9/+pp0QFJ2YJMUPtUBWUnWdHtgmxK2ISKeCBcZ0sOQU/f699+08ZiCABN3yCFMiefD8eXuLiGGR6kod3YM6UE5cLCW0py5ZOemEqJ3xooH0HAHiLdJ/56dFUFpssB07g8GLfIxWtnYn+A0TYALzngALIPN+inmATMBKINDPSMtzosnP1/7XPishhHz5SwA/JkyACWiIgKdGgGCxZmCwj3x9AsQfs7PzFWs80/OBgX5qaKyjnp4OCg2Nkjsd68RiR1XlBZFepF96lISKVF2ZmfkUGBhKg4MWQv7zhoZr1NnRajufkpojU3o5toc/vltaG+hGTTm1tzfLOlFQb0JiOiEqwpUfSV9fN1VfvyrurbDdFxWVQGjLX6QmaW5pkv2OjIynkOBwu5ReEH7aRHvXqi7ZtRscEi6jb2Ki42QfZquAi8HgLdts7vWi/v5+jgBxgN/T0yMNzxH5AfHjiZWfp8Bo5141ZY219KP9LxJ7fszWE8ztMAHPIzBbESBYaIbpOT4fsRiOgsgApCWCoIDFafVCM8SKf/jpr+Vn5VgFdWDB/rv//iL1dIs0W8LbwdmOfNT3lz98QSzsjxY/HOtH//7tly9TR3sffe+5r9vV52oxHNEgf/5/r4wSeJS6lUgOCCEYF9JiuWMAPx0RIGCPyA+1+ONOvWD/by/8hvLS09yKWoFwZPlgv12aM0e2eA+Rw/yd79ILf/dXlJu/SF4CgQJRNAfOlNjEI8zrx5/ud9k20mwdO3PJTvyIEP5sd6zf4PQZcDZ3qGPvmVP0k1/91k60QT2ILHL2LDmOyZ1rHO+ZL+/xt4KX+G7mI0zPHQuOc2ECTEB/BEb/b6A/BjxiJqALAv6+RipenEhrFtkbsyI8FDskuDABJsAEtEJg8jEPMz9Ci2V45htRtTBeBIi5q5M+/ugVunjxNC1fvkFGKBw6/AFdvnScenutOZFj49Jo48Z7aeWqe6Qwsm//O1RzvZRaRDoGFOX8pjsepajIOJsIArHj/PnjwvR9J5WWnqT6ukpbz/z8/CgmNp3Wr98q6t1KCfEpduJJa1sTnTzxMe3d+5YQMc5Ra1uLEOj9ZVuLl6ylBVlLRN1HhLH4Ndqy5VFavXqzEBgCZEQKhJPy8gvi3tdHtRsRGUmJSXm0ccN9tGzZRinqOIo2szpB3Jgk4Bj54Y740VfbQw9lb6Xs6BSmyASYgA4JzFYECND6h4RQcHCY3YI1PiMrW1soTezMVwrSGmGxXi1+QDiBl8Odq1ZK8aCkvJx++epO2zWIXHhxx3u0elHRKGEB9WGBG1ENSkG6pwfu2mBX33//4SPbNRAIEFmwsijHLlpDiRhwfFQOn78oF/7Rz8e23i3vU/rpWC/Ghf4oHiaOdanfT0cECBb5kZbr9NVyu/HfvaKItm3dLPuJUiFEklfeeNcusgWCzVgihLqvirC1PDub7rlnsxS8UD44cJR2fLTXTmBAJAhSm31PpKNSBITbc/IoLS3Lbp4QLVJ9X+2olFmYB3iM7Dp4VN0F2rJuKeXFJ9odc3yjCEJK6ixEfkBsQQHvkNBw+tuvfkmmVXOn6DkCBP6nZdfb6PTlRurtH0mBFRJoolW58dIDlQsTYAL6IsACiL7mm0fLBJgAE2ACTEDzBHjf1sgUjrW4jw1uPcIPBOLH6dP7RKREk4wC8fH1o7vuflIsiHTT6VOH6HLZKbGbtF6IDdXU1FQnzxcXPyjPX7xwms6WHJTnY2LSpBDh6xsgd8NWiuiLt995kT7d/zalpWcJ34v7RLRGrOyc0mZDPaI7+sSCw1dskSAQMErOHKRXX/0BVVZcoQXZS2jN2nvF4lM4VVReouPHPpDt4l4II0VFK6UZLozGIbpcvHhK3PtTOabExGRbu0p/jx55Xwo4uLa4eJvsL5e5I8CRH3PHnltmAlomMFsRIGAUJdIYQrhXl85O4c3QYY0IUY47W6z/s2efpG8/+LA0r0ZB9AQW2L/1/I9sIggWsV94ZxfB0BoRBUrZKVI/IbJAKVjkfu7Lj8gUVzEivZJS3+olS+h/isgEJbUVFvRf3rGTNq9ca6vPWRQB7lfEj//1rWfomXvus+vn54sW01f/z7/YCQvoD/oF35CxijuRGmPdj3MNHe0y7ZTBYJKXok6IH//yN39tJzzhXLoQop75h+dtggCuralvICzyuxPpsGnpUvr+//iGjOxQxCIICRCE/uL7/2EngkAUkSmuCork9x14c2xaWmgvgFy+SqWVFaMEEFz/yaVSam4e8QyBqAWBTD33rthAdHMUZXDt4swM+tvnviafL/Tf1Xyr63WHi6t+aP344NBNuioEkJc+KKVWcx8Zb6wbDEYAACAASURBVKUUS4oOpFSRFosFEK3PMPefCUycAAsgE2fGdzABTRLo7Oqnw+dq6Wx5i13/c1JCaePiJApx00xNk4PnTjMBJjCvCHhyBMhsgx4vAkTpD8xEEaGRl7eMttz1RUoTaaaGxOLBgqw99PLLz1PZpQsiIuQdKSZs+/xXbOfPlBwSERc9UiRBREZh0TopRPT0dNK5s4eo5PR+afa6detX6PZND8tUVcPDg1Ic2bEjjD7a8wodPbqXCgrWU27uMhmNgdRWiDJRxI/t278lozV8RQQI0nUhYuWD939D169fk6anKMpCGM7v3fuGFD/S0vLogQf+mJYtv0O2C6FFLY4guiQ1baEYY+FsTwu3d4sAxI8P93w4obRXHPnBjw8TYAIgMFsRIFhIDhTRg47FYhmgts52eVjZmQ+DcvXC/5r8hfT4nZ+TooJ6QRqL1I/fe4a+98Kv5f0QNk58VkKltTU23wiYX8N8XV0fogxgxA3xQ13fhtwCGbmASAnl+pNiAR7+EDDTxrWuIkDQNiJUIH5gQVxdb0HhYvralx+jsud/aBf9cvDTIwRz+LEW7KcjAgSG4xAlrpk7ZZRHQ0uLTBOF4+p+YmyISkmPDLMTKlp7R3b2O86f+j3SRn3rj75EGC/qVeoGjweLN8l0VS+8+ortFghWR06dsqW4wvzeVbzKTpiACIUIEkeTe8wr+EF4UkpBahKtK1wyrnCBfkF0G7CMeHYqz0+d+Fv+yNkzbkXnKO3qOQIEDIL8TFLsCFWtc8SGB5DJyMug6t8P/pkJ6IUA/+brZaZ5nLon0DcwJMWPtw5eoQHx5QoFOyGKzQm0Kl/kV9c9IQbABJiAVghwBMjITI0VAaJchYgOFKSHWr7iTspIz5d+FTdv+lDWgsUUFZUuBRDsfl2yZIMUP0wmH3k+XVybnLJACiDNYkEAESUR4VGyPqTTWrFqC0UIw/clS++wHRdLRZSclClEj1V0/OhuGcnR2FhDOTnWlA01NZdlhIZRtIF0VxA/kHoEBamyijfcLyNBIICoy+DgEOHeCxcOycOIRlm9eovtXqTIys9fIaJY7pdptS5dPE5lZafleGbTD8Su0zp+A/Fj3yf7JiR+sOeHjh8YHjoTcCAwWxEgroQDeGKpC3b7VzS32R2DqbXjYj0uQJ1IeaX4ikC06GhvpfMXzstFdZxvrLlOl+vtN6YhysCZubWsT0SBIAWSkhIJC/BIt6X4dagFA3Un/f0DpGeEo0ijXIOF+ez4WGkEjoK+nq9rk/1zFGLU9U5HBAjqgyhRoKpYLebgZwgK6MsfhLm7I39LSyuZ4bklhIyxSqEQIBB944yRIm44GpZfFmmslOgS3If7i3Mz6a1PRyI79gpRC+muFL8QzBNEqQMXrtp15/aNq50+J876DNENG0vwQho2RJJATEF6MghqEOGUSBBn96uPjcdlvPu1fN4kzM7zMiLp2QD100VS/HA0RtfyOLnvTIAJuE+ABRD3WfGVTEDTBAxeXhQe4kOZiaHUb7HuloEpWGJUMOEcFybABJiAVghwBMjITLkbAWIyGaXQEROTIhde4KWBgoWR4JAwIXgYpWcHjMuRikI57yfEk6DAYClW9AszWKSVwjl//2ApPuTmrRL3muQiT29vt6zTagY/KIwnfeV7/AHf1d0hI0NQmppqpb8IPD/g9YHIDxTUC0EnMSGVFhWsIKSyUhdEeFRVnhciSLVMfZUtBBX0A8IIipf4LPPyMsioD4wFqbvgaYIIFhZA7FDO+BuYwEP8+MmPf0QxvX4UXxBDl2sqifByKO2Wbvrtsfeok5rZ88MRDr9nAjomMFsRIEDcdcvzSo0bn4+KBwWOI0JBER/wHhEQMRERcpHcWXHmK3K5/Ibt0jMi7aQ6TZLV48H6uemsTqTpcoyAUC/SuxJysMEA0ROuCgSXrJx0uzRYDQ21MirDfunYvobpiABBjYoogf7DEwRpsSA2Qdypu9Ek0k5dIUS7YAOGWnSZSPtZmVkU5OPjCgGlBAWP8oCpr2m0E1cQDbNu/Wraefi4rR/YqLGvqsomgKD/iNJQPyeK+TnG50yAcewU0ppBlMK8QNzZc+yQNKdXfGeQBg0+NOizIrw41qG8d/Ycubp2vh338rqNYsL85Uv5u4E3UM23WebxMIGJEWABZGK8+GomoFkCMPy6Z3UarSmIt43BKMzPA/2MIv2VNe+qZgfHHWcCTEBXBPgPmJHpdicCRLkaQkdAgHPTRwgc8Mrw9w9xaRoO7w6l4A95GIyjIDKkouI83ai5Sl3CdB1eHCiVFZeF+XWbLUIDx5B2q7WlnnqFKAERIzQ0UooWiuCCa/A+PDyOwsMipAeIUnrEIlNTU70tNQTa8zHZ52zHtU1N1gUmpC9B3zo6O2z+I7bK+IcZJdAlUq59+NGH1HKtmmKis+n9lhNE9hudbe0j5dX1nhv0UP4dbHg+o7PClTMBbRGYrQgQhQrEenXB5yJEDBQsXJv77CNCsBj//Au/pZ+/9p5LsGqBA9eb+zqkYIKogzaz2S5NEs7DLP2tj4+5rO/69et259SL9K4W1xVvE2fncQxRAomxMXb1QmxQ0n+56sx0RYCgfjDZe+YUvb5nN50orSIIMIpx+Vjth/i7FjWU+xRhyVU0BBggBVqcSJNUo2rMLD7HesTGDyJrhCq+92wT6cYw34ppPSIzPv3wI3pk3XoZLQPxBhEa6rJcRPXA/NwZf2djs6Yds7aJPj++5V552Z+q0pQ5M2p3VperMTu7dr4dGxaCobnHQg1t9gKlQax/xIT7U4Cvcb4NmcfDBJjAOARYABkHEJ9mAvOFAD7s4yMD5YsLE2ACTEDLBDgCZGT23I0Ame75xh/ydfXXRZ7rN2nfvnelSbpS/AOspuOI8oAI4VgUgQTHTbeiRNTXIJIDwgjqUQsgg4MDoj6LrBP+ITte/6lj1bb3uA9RLSjWqJWbLoUdl5XwiUkTGBwYEJFDvvSVVffRioQCCh9jjepI0DXaU3acFogIJS5MgAkwAYXAbEWA4PMM0Q4Q7NUFInxaeITtUE+3edTkIK2Verf/qAscDvS2dshd/YphuuP1E60Pi/RKcRUB4tiGs/f+YnMEhIKJiBoTicBw1qZyrLq+lv7xhZ/Ru3sPuxQ9EGUKQQp+Zoq3BtrvFZ5kY0V2jNWuI7egW55jru7Bc4KoDEcz9BMVtdLbBQLIwZJTdK5qRKSC+fkjt48Y1buqe7zjiApZtmAn7f3sM9ulSL8FwSV5jPRfeo4AsQwO02eXGuj3ey9Ru/BPUQo8QJ6+t4CKMq3pXMdjz+eZABOYPwRYAJk/c8kjYQJjEhgcGqba5m5qNffZXRfsb6LkGHzp5TRYYwLkk0yACXgMAY4AGZmKiUSATOcEdnS2SrPyt958QUZzLFy4nBbmLqbIiDjpDYJSWVVKb77xn3bNqhe0jEbn0SjKDWOdR0qPlNQCmb7LVUEKrgXZRRQSEs7ihytIM3g8TKRGCzGaaEFiGgUZRkfqKE1XmEcii2awO1w1E2ACGiMwmxEgSG/lGHGQFOJP/iIN5Hg797HI7U6Bp0hHj725tbP7JlIf7keUgre3vWm6s3rHOgZxZyLiB+qa6PXO2scC/Q9e+h397h37lJcQN2JjE2l5ejxl5S6gnNRUSkhKof/5ne/aUnWh/XC/8Zezxusn5hfz7E5xZoZeX18jDdNXpWXQR0ePOTU/H6tutXDl6lmDyIM0XmoBBBs9KltbKC0hyeUzqucIEDA391qoqr5TRIKMbMZBCnDLgDUl61jzwueYABOYfwTG/8SYf2PmETEBXRJo6eyjtz8tpz0nq+w8QJZmxtDXHxSGeyI/JhcmwASYgBYIcATIyCzNRQSI3C1bdYkOHHhD7phdv+EL9MjD36CMjFzZMW+xcIFUV/DdgM+HuqgXtAYGzCKaw16UV67tt8BvxH63rcFglH4jiOyIjUuj7du/SZmZ+WM+smjPaLTmVB/zQj458wSsJi/27ahzn818D7gFJsAENERgtiJAsAj/4cmzo8jA4Nyajshqao4oCXWBUPHclx+hR9cWj7p3rAOoE5+jt4m0k47lz555YkL1IXWTYpruKgJE+nCJiExvFwvlGH9Hu/1nsaP/iWM/8X46IkCQ9mrHR3vtqocP2LefelCmm8LYsIiPsVUKkcoxSqO1d/yFbPSzpr7BZmjubCwQka532KdKQlvOhBFHM3QILCUnPqOjwqQe6bvURTE/d9YmjoE9ojgUD5rxPD3U9QxY+qmnw2pcP1b9rs7N9+MG79soJS6Etq3JFBt1Rp4TeKJGCnGTCxNgAvojwAKI/uacR6xjAvjw7+zuF7sehiWF/oEh6hVpKrgwASbABLREgCNARmZrLiJAYGYOnw2kuEIkxrJlG2nBgkVCmBjJc9QrFiVaW+tkdIhaBIE4EiLuwbGe7m4hyPdKc3T1wg12ySpG6ern0mgwiPbCZRoMiCO4F+07FqypQxiaCzaOfdHre4PRRW5tRQRh4UOvjwaPmwm4TWC2IkCwCH9CpBNSFxhXr15UZHcsOSqCIHooKZjwWQUT9ILCxW6PCRdC/MBnXlpEOCG1kxJ5oggKWAR3JWY4NqSOGHAVPYCNCkjxlSvadVaQkgtG4+oSExMvTbbHKuNFVox1r3LO0TAcfCF+fOOhxyQDZUzKv44ihbsRIIpXCiI4HDlJcUVEUqhTbqJ/sYnRo9Jr4V7FDP3942dsz8KBC1fJ7/XXCKboShnL/BzCxw/feJ0O7D9MSGNWJ1I0LYiNoJ9/539RcuyIX6dSl7M5UnvUuGKt5wgQZLfIT4ugrESrX53CyFukWTUZOPOFq2eGjzOB+UyABZD5PLs8NiagIgCz8zUFCYRdD7391i/Afj7elCg8QXCOCxNgAkxAKwQ4AmRkpuYmAmRQiA99UtxAjvTAgBBpXI6irGs3tzRR2eWzMl+3WgCxGpzHE0xZsSsVRuZFhWuleKLcaxZG6lWVF2R0Cf7AV4qvEE2Sk7NlmxBfcG9v71pp3q4urW1NVHLmoDyUtWAxxcUmub2YZFcRv5k0AXiAuCwsfrhEwyeYABMYITAdESBK9IAzrljM3n/hHP3kV78d5eOxZd1Swk5/dSlKSKbIyGiquVElD0MAQOTI9jubbZEi6usvnD8rhQcYqcNLBNEESkQJ2s5LS6fs+FiR0sm6ix/1nTx5jpq3t0k/CfVCPRbpz5Wcdlkf2nUlmkBg+eDAUdpUtERGU6gL7oF/RYnKtwLnkf4rOtF1aiVco44AcdW2XWMOb3q6zKMiTxDhmS4iVdTih3KbM5HCnQgQ3H/y8lU6cbGU7hVRJY4FnJHCCv4r6rIgI3MUL+U8olNeeeNdWzou3PvmB/vt0l8V52aOaX5+ufyqXUorfOeBh8jjDgIIWKDv6jkC+/TIMPlcOQo66jHo2QMEfysMDt0UczJEg8PWzZ9gYxACiCHQRF7E26kcfxf4PROY7wRYAJnvM8zjYwK3CPj7Gml1fox8ORZv9v9wRMLvmQAT8GAC/CfLyOTMRZQDojggekDYgIjR0FBJPT1ikcc/WEZzNDTWSXP08qsjZp3dXa0yLRYEkISEDIqJTaezJQfp7LnjtGTpHYSUFziHek6e+JhKS0+Kn7spRCWAIKVWQmK69P44fmw3nTixn9LTC4T/yBJbmivcf+jQLnr7rX8XXiQx9Ohjz0kBhAsTYAJMgAloi8BUI0Cw6//9IwelAKEuSBvU1tlOx85col0Hj9oEDeUa7Nx/ePMWO1EDi8wZYkG8eNlC+t0tAQTXHz5ynPYcO0SPb7nXtmiPBeuGlmb6zi9+SYgSCBCplCDcQ1T4q29/kzbkFsimkN5pSdFi2yI6jmGh/tWPPrCLgFDq+76IGMAiO+rD529coA/97XNfo7tXr5P1jbUQjjRTK4tynPbzJRG5oDZyx+J68YY1TkUd2dCtAsEG0QtYnFcYg62at7P3iCxBlIujGINqLZYBKikvl2NSRBBl/Oino0jh1+uehxREIAhdEJ0UzwxFtNl77gz99x8+svM0kRFAIqWVMyEG/USUBtJbnSgrkzTAQh0Rg0iWbVs3u2SIsd+5aqWdaII+/uzXv6ewYGG0LsQqJVoF/fvH/3zZbo7Q1njptdAvZ4xlh3VQhoT/6aXqVtp99JrwyRnZlBER6kf3rkmj9Hj7/xd0gISHyAR0T4AFEN0/AgxALwRuDt+k7t4B6lZ9AcDYfYwGCgvyodu8eElRL88Cj5MJaJ0AR4CMzOBcRIAYDCYpROQsXCGFiH373pUCRHh4HHV1d9DlsjMiDcRVSkzKk2muGhpq6cyZYxQTk0Z5+SspLj6VFi9eRxcvnqCjR94nHx9fWrZ0o/QMqa4uo9OnrdEbMEBFlIlSbhMfU8lJmbRp0/0iRVaFuG6fPLV8+QbZNsrlK6dEnz6gmppq2rBxqWgzxeWuWKVe/pcJMAEmwAQ8j8BUI0BgGH3gTMkos24lcsFZCicsXH/rqe1yAdqxYDEZwsjug5/ZFqMh1P/F9/+Dqpta6PE7PydvgZ/DC7v2SPED6bLwgsCQtHSp3LGvFNT3wLYtdiIMFsGff+G3QkBpkV4g8PhQ6lMiDJQUXHHZ2XJBXynKgr5jv/Ee7f/DT39t109EVEBUeO3dD+1uWZyZQZ8Xwow7BQLA/d/4U3mpM644puYsxZWiQvrNv35XigMLMhLkfco1GNsvX91JQb4m6QGCUlpZQb955TXaefj4qLpgKo/0UO4s9ONZ+JO//yf61h99ycYNERcQHa5ctQoZyjgQAQRTc1eiElgjRRqEErV4pDArSE2idYWjnyHlPP7F+TX5C+2iQMDzmX94nhA94hceQohwOV96ZZRIl5W50C2vGD1HgCDV97W6DvroVBW1mq0eN0Yxb0nRgbQ4K4YFEPXDyD8zAZ0QYAFEJxPNw2QC5h4L7T5RRScvNYgUWFYjMD8fg8iLGUaP3L6AQsQuIi5MgAkwAS0QYLl2ZJaUCBApQIgdpv4BAeTnP5ISCqbhEBjglREUGEwwEncsOI57g0PCpMm4YzH5+MjzIaGR8hTaVISI9vYmqqwspR2v18u2UYzGIFq9ejMtWrSW9gaF0p49v5diRV9ft+zb8mV30Oo1d1Nzcz19euAd2r/vLZHa44Dt/ozMpZQgRBKYrEPIUBeku1q2/A556L33XqZLF48LQ/ZztnuRGgvptTZvfozuuuuLMrKEy+wTcOkBMvtd4RaZABPQKIGpRoBg2M5EDmfHcC38OP7k8fvpmXs/73JBHcLI0w9tpX/75cu2Xf9YAP/f//dn9JOXXpWkYU6t+Hoo6LFQjoV3R38HLLI/s30bffffX7SlT0J9qP/nv3vL6nkFs2shtKj7jb5+7cuP2dXnarEeog5SSyF113j9RL2PfXG7jNCYaHHG1fGY+j1EhNtz8igtLUsIEBdtzaGff/eDn0khyJGnWixBXfAEgYE50Wg/MKVCjB9CA6JrIIodPn9RRtGgIE2nIigp10MA+vMHH3b5DOA6sHY0Q1fux7+I7EGEz1gFzwKeCaS2Uoso+Hnn4ZF0XI4MMUd/87R7c+SOMDRWH7V8Dl4fQX4migkPIB+TddnTz+RNseJ9UAAvg2p5brnvTGCyBPg3f7Lk+D4moDECiPyovGGmj0+NLCZhFwQKzrEAorEJ5e4yAR0T4AgQ+8mH+LFy1VYRlSHyVZv8RIqoHJFOykt6aoSFRtBDD31NRE08IKMhIHaoC97fu+1pWr7iThFFESMXgNQFaa02b36CCgvXy/NxIioDBULE6tVbKCoqgcrKToiIjHqRusIi0lslU2pavjBFXyzbChAvHLtxo1rkTo+VdWDRIyM9n7Y/+qdUULCKKqtKqctspsCgIEpLzZO+HQ0N16QAgoLxqUt4WBQVF28T7SwUbZ+m2tpyeT8K2sjOWUKJiQvY+8OO2uy+GdMDZHa7wq0xASagUQJTjQBxd9j43Fu2IJOeeGgbPVi8acyFbywoP7f9i9K74qW3/zBiiC4W451FAqAPEOL/+ZtfdhpVgjRHEFx6us1S8FCEEyx6O4ooynggpvztV78k+6ouriJA4FsCY/Gfv/aeFBpc9RMc/uzZJ+npO+50F92krmsZHNnGAqHlTx7ZSv/0k1q78SqRM0oDEDEeuGsDJcbG0I9//ZqNO0zHESHiKCypOwbxB3NbLKJ0/u0Xv5HtOIoeyvWIrPjb577qlgCECBakuVKboaMezA8ie9wRHyCo/euff11G5yjeMqjDUfRQ+odn6c+++uSouVePV/2zniNAYHRemBVNfy5SXlkGrJs/wSZAiCLwQOXCBJiA/giwAKK/OecR65QAUl3BAH1hSoQwr7V+CcBuiIggf5kGiwsTYAJMQCsEOALEfqaw6JGUmCpfSlF8ppFWKntB4ajjygGcT0/Lli8UR39q1O3qfFBQmIjyWCM9OHp6ramqjAaD9AJRFmIgdMTFJtPA4CDhHIzMBwb6pU9Ih4gegYhRtLhYRqYo5+EFcuXyaZk+C3nOkd4KviPqvkGAWZBVKASTHBFZ0iNSZfXa6kD7BoP3qLHYIPAPTIAJMAEm4PEEJhoBgoV+ddrEsQYIA+nE6AjKyl0gUxlhN79iUD7WfTiH67733Nelp8bLO3bKyALHCA1EKoSEhstURn/y1Wdtvh+OdSOSAPX93ZN/RDmpqWPW5y8iKO8oyqMnH33EqaG5qwgQREUinVR+bj79/IVf0MdnSu3EBogLSNmEiJLxBCBEkxpV3lyO43HnfXqwNdJU6e8z99wn0jEHyVRU50Q0hFqcUPr26IP3CrP5u2S0xyf7jlBFc5utqSNnz4ziERMTb+snvkfAVwNjS46KGNUO5gps16xeQd9+8nGXc+VsbEoaK7VBuZI+y9n1jscggME/JiEpRaYjQ3o1x6gU5VlCvU89/MiE+ueOCOPYp/ny3kuk90aab7wcC6f+diTC75mAPgjwqqc+5plHyQREhIeJPr8ug+5YmmxHI8DPKM9xYQJMgAlohQBHgIyeKUfhQn3FWOdw3VTOQ+gwGETaLb+RtFvq+nAeCyZKwbnGplratetF6dWBdFeIUIFQAtFicHCI6uqv07lzR6m1rUWIK8spOjpRnHP+OQUBxyQWYxzbGG9Mownq48igMAXtaG8TolEfhYWFiUUff30MnEfJBJiA5ghMJAIEi9vjeS6oAfj7+lGQSO+IBWJ8TrkSD1xBw31YuN68ci2V1tbQ+Qvn6XL5DTL3dQj/ihDpbYH0TjBOd2cRWlkIH6s+CBh58YkuhRpXESDKGDYVFFHeP/2TNC2HaFBT3yAjKiAAwUtkrCgK1IFohb2/+LErJG4fB3u12KSwxPzBk+NSVZWtbxCFcBx9w/h6usz01s9+eivtlbVJ1KdmjH7u+vG/2PUH6ajUcwbjerSDSJ64hChpeI50ZO7Mlbpi9At+JtY0XM77MxYY5bnbkFtAq/4yg8ofvEr7RL/qRdQs5sfxWVKM0ceqU31OzxEgw8L/tL61m8pFBoyBgRET9MAAI2XEh1FEiH10sbtM+TomwAS0S4AFEO3OHfecCUyIgMHbi6JC/OTLsWCHBBcmwASYgFYI8P9YnjVT44kNjueRZiMyIk5GeMD/o7+/j9auuUum02pvb6ZTp/ZLbxDs2ly1SuzYTM6UviOuimP9rq7j4yTFjzfffJ3OnC4RkTeFlJ2dRykpyRQbGyu8YkbvkpwsM/YAmSw5vo8JMAGFgLsRILgeC8Np4uVuUQseExU/1G1gIX+DeEFccFbcrVu5LiYikvByVR/acFWnq+NKv3Ae/b1XRIPgpT7urO/qY7gX4oAzxjjnKL44OzZW33EOYsKXEpKcdgX1KX2wihRhToUrx34q/cC/SgGDL229z64d9XmnHRjjIOrz9o6U/ZuMmKZUjXEVFC6WL2dFYeDsnKtjExV0XNWjxeOWwWEqudJEv3r/PLV39cshIPtFXLg/PbutQAggcVocFveZCTCBKRBgAWQK8PhWJqAlAn2WIbpU3Up1TV1kETtsUUxit21EqC8tyogmX2EKxoUJMAEmoAUCHAGihVly3UcfkW5jffEXqKurk44c2UMlp/fLF9J09PbCzJQoNi5NGqmvW/8ABQaObSTquiU+44zApbLr9Ktf/UaawCYmJgvxaZmdGBIVFUUmH1/CxonJFvYAmSw5vo8JMAGFgLsRILh+KgvY00F8utqfSj2OIoSrcU2lDVf3Ojvu7JirPinHJ3LPWNeqzzm7ztmx8fo21nmlvqnWO9X7Hfuo5wgQsMCaB8SPVnMfwfvU3DNAMELnwgSYgD4JsACiz3nnUeuQQEd3P+0+eo12HSu3jd7H6E1rchMoJTZECCCcBkOHjwUPmQlokoASC4C8yCgDwtwQoe4czaaN6UQ0R1RkHD3wwFeFh8haOxN1k8kkIkGEkXn2csrMzJfix1jRH9oYsWf1cnioR+ZYxwtmuGWXLog0YkZKS8+SYkhGZhaFh4VTekaG8I/JoqjoGP7d8qwp5N4wAV0QmEgEiC6AjDPI6V48H6c5Pq0BAnqOADF430YpcSG0bU2m8AYaMUGHJ2pkCK97aODx5S4ygWknwALItCPlCpmAZxOwDAzbdbBXlRPTs3vOvWMCTIAJjCYwYLGGtY8+w0c8mQBEDcVEPT9/hezq0NCgNDtHgRG6u7tZPXmcnto3iIeDgjcKhJDBQQOVnj8jX/BbQXRI9sJMKipYZIsOgRgSKnxDsCjJYqOnziz3iwnMHwITiQCZP6Oe/Ej4M3Py7ObrnXqOAEEUa35aBOUkh4+aXpNh8hGuoyrjA0yACWiGAAsgmpkq7igTmBqBQGF2vqYggbDrQV0yxM4InOPCBJgAE9ASAexYV4yxBwb7aVgYQXiRa58ILY1NT33Fgg1eVosP6+eTHj09EMGEZ3hocMSocyaeA1epqRQxBG3i58FBCx0+GJGNpwAAIABJREFUeJiOHzlOIaHhdmJIoTCkzRDRISEi77mXmDhnYgh7gMzE7HGdTEBfBDgCZGLzzREgE+Olh6v1HAGCdLmDQzep3yK+04h/lYLIEIO3kf9m0MMvAI+RCTgQYAGEHwkmoBMCAb5GWlsQS6vzY+xG7GrxQidYeJhMgAlolIDBYJRpe1DgJWERRtoGfw5p1+h0kh5FD2WuIH5cuFBKb7/9NtXV3pjRKezq66OSU+dt0R+uGlMEEfyLNFmHDzZKMSQ2NpGS0+JlZEhuXgEtXy5SZt0SQ9SeIa6EFlft8XHhyyZ8V7gwASYwQoAjQCb2NHAEyMR46eFqPUeADA0N09nyRvrksxvU0zeyuSQi1I/uWplCC5LC9PAI8BiZABNQEWABhB8HJqATAlhgMfdYqG/AaoCuDNtX+IAE+XM6C508BjxMJjBvCMCjIDg4jDo726ihoVnsWB/J7ztvBjmPBmKN8KBRQoer4/No6G4NpfpaNe3Y8Q6VlJx06/q5uAhiSM2NKvlSxJD8POEbsr6YvvCFL1DOwtwpGafPxZjmuk2LxWLXBYOIhuLCBJiAlQBHgEzsSUAESExIKP3vv/9r6unosLsZx7noj4CeI0AQ9VHX3E0fnaqSJuhKSRfZLxZnxQgBRH/PA4+YCeidAAsgen8CePy6IQDxY+fhCjp2oY76RCgoiq/JQIsyouiB4iyKCOGdh7p5GHigTGAeEAgTXgR+fn7U3Nwodqi3UG9PjxBEgufByCY3BEVIUO72lIiKm6Ij/cJjotPcIVKWGSkoMFh4SPgIIcT58cmNXvt3wfw9LiaMWhJSZ3wwEA0xH5Mp8A7x9w+Q4qMifqxauYri4uJkOiwuEyMwIHzYerq75U1e3v7EqcMmxo+vnt8EOAJk4vOLBe9NBUWjbuT0WKOQ6OKAniNAMMEmgzfFhAeQj1jzUEqsfK+L6edBMgEm4ECABRB+JJiATggg8uNKdTsdLq21jdgodhr6+Riof4B3TuvkMeBhMoF5QyAsPJyiYsPo+vVr1NrRLoyc57cZutncRnX11TTgsGNcmVCjWEA3Gn2luIAFakVomOsJh5fEkSN76J13XqSkpEza9vmv0IKsQukxoRyPjkmkhx76GmWk5+vW+HzxksX0/He/R2azeUanrK21lX7/yu/p1VdfdbsdR9Fj+YqltGrVesrMTKeIyCjpBaJOf+V2xXwh9fR2U29vryQREw1xkD3Z+LFgAgoBjgCZ3LPAYsfkuM3Hu/QcAQKj8xW5cZQcZ785yijM0WPCOGXufHzeeUxMYDwCLICMR4jPM4F5QsDg5UWJMYG0MCXCbkSJUcHkY+T/CubJNPMwmIBuCAQFBVGwWOjHQnrjjRvU32ddRJyPALCYcfLkPrFo/ROqr6t0OURExISExlJe3jJavuJOystdToGBocJgfG525iPKY0ikTWptraOLF09QX18PdQu/FhxHGRjoFwv+DdafXQg7Lgc7j07ARDwiIkK+ZrogWmrf/n1CsDCM6wMSHBRCMTHxQphKFM+TVfTIyV5AISL6ikWPqc/UoMhP3traLtP4GQwmCgsLEb+vQVOvmGtgAvOEAEeAzJOJ5GHMGQE9R4DI71Yiw4WS5QLpwHEM30Dn5lvxnD0G3DATYAK3CPCqJz8KTEAnBEICTfT5dRl0x9JkuxEH+BkJ57gwASbABLREIDxcLMIKHxD4EtQ1tFF3z0h+Xy2Nw92+dnV3SPGjvr5GLD6HU3jY6MVy7CRvbSuly2Wn6MyZT2jr1q/Q7ZseFtdGudvMtF43lvDi5WUQJtqr6Nk//j+yzbj4VPGHqde0ts+VTYyAInggsgriIkSPpUtXUUF+3oRFD07lND57S38ftbW1ksUyQAGBgUL8CJaLM1yYABOwEuAIEH4SmMDUCOg5AgSCR31rN5XfMIsNNyMm6IEBRsqID+P031N7tPhuJqBJAiyAaHLauNNMYOIEkJ5ChnuGTfxevoMJMAEm4GkE4AESIUQQ7GRvaW6WL2V3l6f1dTr7A/HjgQf/mAoL14scxn52VTc13aDKqlI6fuwDKrt0QZz7LwoPj6N167aSyQTfjZHLxwoKGc8/xN17lUgPZ+P3FikY42KT5AsF4ocimKjrR19ctTdWP8e6x7F+Z/2b78cgHKL4+viL1GQpMsojt2A5rV65RJiZZxMirPA75uPjI18TLYOqxYaJ3quX682dndK/CAWCZkgwmxTrZe55nEyACTCB2SCg5wgQy+AwlVxpol+9f57au0bS5KbGBtOz2wqEABI3G1PAbTABJuBBBFgA8aDJ4K4wgZkk0GcZokvVrXStzt74NC4yQBihRwtDdO+ZbJ7rZgJMgAlMKwEsymZkZsloCKSQKbt8mVasXDHvjdD9fP0pPj6DigrXSs8PdRkeHqTVfVvk+d+9/D2qrLhCp07tp7z8lcJfIN52KVJqdXSKz4OqS9Te3kyILkEJDAih6OhESk7OdJk6q6+vW3iR1FBDwzXq7GilfkufEGJ8KTgknBITF4h24qQPCcpYESBKH5qba+U4IiOiZZtIadbW3iL61SS8TELE8ShqbmmimprLtvbQz4TEdCGeJIuF+tGqvtLHqsoLMs0WCvqXLjxGEOWA+np6OmT9EGAgxuipBAX6UlbmQsouyKIFgsmypXm0dMkSio2Ll2IHdl1zJMLMPxHdXV3SvwgFUTeBQnTiwgSYgD2BKLHJAYV9LfjJYAJMYKIELINDUvxoNVujxOF/ivf9lonWxNczASYwHwiwADIfZpHHwATcINDR3U+7j16jXcfKyTIwLO8wGb1olTAHS4kNEQIIm4G5gZEvYQJMwEMIeBtECHt6htw5DSP0C6XnqLenZ94LIGr8BoO3XVQHFvIhPuTnr6aU1ALJ5fr1q9TdbbZdB0GgUggfn3yyg06fOiTTavUKbw4UiCupaXnC4HoTrVx1DyUL03JFHEA0R5sQS06e+JgOHf6ALl86LtJtWXev417MQ0FhMa1etYWWLdvoVJhQ9x1iTcmZg7R79ysUFRUnzdEz0oOlT8inB96mEyf208KFi4UYky1FnAsXDskoH/RV6efWrU/Q6tVb7NqCWTz8UlDvpYvHbddHREZSUdHtks3lK6eEOHSZli/fQPfc85TwXwiw4+ghj/iMdAM+MXd+bistzC2QokdiUjKZfHzZxHxGaI9daYe5S/oXQfQLj4qUUTdcmAATsCdwW1wcNR46RFELcxkNE2ACkyDQLja9+IQGTOJO7d6CgGeD922UmRRGj9yeTb39Q7bBhAf7UEIUr3tod3a550xg8gRYAJk8O76TCTABJsAEmAATmCMCXiKPUUZmhkzdc+XqRaooL6cOsZs6JjZ2jnrkOc0GBAiD+JAwkfbKKAWFgYE+aTw+PDxMly+fpdde/3c6euR9KVqsL76PEhKSqV9sh6uovEQlp/dTVWUpNTXV0/0PfJ0SE1LlwLq62unQoV309lv/LqIxqoU4sZw2bnpQpO0JE9EkbVJM2b/vLaq+dllev3r1ZvLzc/4HNyJDFHN0iBRNTZG0SdSllOaWOjp9ep84XiEWhWPI1zdARPfcZdfW2ZKDcmyI7Fi6ZIMUfiDulF44Ic3i4YOSlp4lRY/g4HBqbq6n0tKTVF5+gRrqK6R4k5CQIu6xyPr1Uvz9/WnN2rVyuPgdmqlID/YAGf+JqqutpfKKGnlhcmy8iL6aG6+e8XvKVzCBuSGA/58iQ8OoUTSPVD569jOYmxngVrVOoLOqSg5hwBRG9klTtT6ysfsPNy2k/84WAkhG/Oj0kiYDe86NTZDPMoH5SYAFkPk5rzwqJjCKQKAwO799aQIlRNsv9MSHBxDOcWECTIAJaIkAFkbixM7Q6IQE2W0sJFZX11BmVvaMLepqgQ+EDovFItJFtUlzZV8R1YEUUxAdkPbq5MmPbeLHQw9/U5qkh4VGSnGkrv467dr1Ir35xn/S4UPvimiQXJHS6jHB0yDYXqWPP9oh02qtWLmFtm//phBBlkjxACmnCgsP0Su//zFBmED0Be5NTVkwaWQDFhGpIttKpy/c/7RM+aW0lZa6k/7rV/9MlUKoKbt0ivJyl0sBBKmzDh18zyZ+bN/+/8kIEaTWamquk5El7733XzIyBqbTpkl4W0x6QB50IxYFZrqwB8jYhPv7++lGbbVIJVcrUrEFyHR+kSJKiQsTYAIjBKRtVW4+0a53CQu5HAXCTwcTcJ8AInibKiqoIyqCwnX6fQe0+i1WzzOFnEGIH8M3xQYQgkzChQkwAT0RYAFET7PNY9U1AX9fIy1ZECP9PtQF4aHes7AYomv4PHgmwARmhECgWMTOEmmw4OuAHf1nz53VhQ+IGqba0HtQ5Dru6ekU6cCOCn+PczICJEmksQoRURIodbVVIrLiIEFcQLqqlau2iigQ665zxZR83drP08ULp6WQcbnsDK1cuVnyPXfuUxkZEhMTL6I17heppFbYvD4gTECgqKo8L69B2/DfUKJHJjv5wSK6BGmqIHAoURqIKklNWyhTfDU07JaRHb19vWIROZiaGm/QlStnySgM33Nz19Ky5XfY0mNFR8WLaJcvCK+Ys1JYAQMuTGCuCLS3tdGlsuvi97Wb0tKyRLq5FOm9woUJMIERAliehC9RtIhaQxqs4NRUjgLhB4QJuEEA3+kqb1wnpL8KS7F6wCECFOlj9VLgf3q2vJF2Haqi3oEB27Ajgvzp/g0ZtEBEh3BhAkxAXwRmfguYvnjyaJmAxxK4OXyTunsHCF4g6heO4RwXJsAEmIDWCOAPuYL8Arko39HeSsePHyez2ay1YUyov/DAaG2pp2rh7XG9psr2qqgso7NnD4sIjv+iN9/8T7mzHCmgloj0UEGBwdJAtrFRmJeL9E8QCLIXLJKG5eoCT5Go6AQpmiB6BP4hEBU6zR0iPZYwIReprmJi06UAYTDYL9YiyiQ1LZ/gtQExqrq6TKaomkpBXenpBTKCQ11gXo4UXyid5nYZ8QJPEYyvo71eeoQsyC6S41YKImPCQoVPScEqkRKL/+idyrzMt3truxuok5opQCwOzVZpEn42lyvOy+aQxi85JWm2muZ2mIDmCIStXCP7rKTz0dwAuMNMYJYJ9HSZqeuzUzL647Yga2pY+EzNRgToLA91zObqmrvpk5Jq+vjUyOvk5TpqaLWaoo95M59kAkxg3hHgCJB5N6U8ICbgnECbuZ/eP1ZJ+89ct7ugID2avvi5HIoI8XV+Ix9lAkyACXgoAfwhV7S4iLILsqQPyKnPzorohYtikT5u3v6RB6HnrTdfoA/e/43TWYH4gOgGiB9bt36FCovWyUgNpKlqba2T4gS8P4KCwmVqK3URGoEQD/xEKp5YGT1iNjeQuauN/AOCZUotlBCRLis0NEqmGcP1SvHy8qLw8BjRVpBsH5EZA4ODk97VDpEGdQUIEQPpu8YriqdIb2+v8B7xo8CAELtbUAfGm5iQIUUaxfh9vHr5/OQIaMUDpKzxGh2ruEQpS1dQ8fp10hdlNkp9XT2VnbtCg0ODMo1fgnjNlB/LbIyH22ACM0kAO9ezn32Wyn7xC9kMp8KaSdpct9YJwC+nfO9eGf0RmJUnh+Pr60NhYfrb/BHkZ6LkmGC7NFixIv13UAAvg2r9Oef+M4HJEODf/MlQ43uYgAYJ9A8M0o3Gbjp1BVaC1mIU4bFhQb40KHK/c2ECTIAJaJFAVFQUrVyynI4cOCoX948eOzrv02CNtXifmJgs/ASW0upVW6T4AX8PrOnC7Btm6EqBKALRwrEgDY+Pj0lGifR0d8vTuA8CCoqPj2uxHFEg8BxB6bdMfXedUpes0I2CfoINBA4/4avgKPCgCggqEFa4zCwBLXiAQPzYU3acAvLj6emnn6bktPRZESE6OztFKrbLMkrL18dfpvFj/4+ZfR65dm0TGBaR6r5RwvNLpMK6fOCAHAxEEKT5QXQjFybABKwE1OLH0Jpim9dZZHi4iKbV13cfGJ0XZkXT30Ta+58axeaphKhAfmSYABPQIQEWQHQ46TxkfRLwMRqkAfoS8UVAXRKjgsngZBFMn5R41EyACWiNgEksyBcXb6AdO96hkpKTdOzUCWoT+fWDg0fSH2ltTGP1NyQ0nDZvfkymtsIif6/wEMC/KD4mPwoNi5aeH0j/BJHDVVHucXYeQoZSUD9ZLURsxww6yiHtjA8f0z4Btfjxta9/g3LEYupspQZpbmykTz89IP0/8vKLZBo/+BxwYQJMwDkBJToqcs16avHxoisf7pPmzlHp6ewL4hwZH9UJAUUEhPCBFHFXrlyRI1eLH4j+iI2PnxWB35Ow4/8NZLjgLBeeNCvcFyYwtwRYAJlb/tw6E5g1AiGBJvrc8hRasdCaB1RpONDPSDjHhQkwASagRQJYtMzISKecnAwqPX9GppU5fPgQxcbGzstFRcXfYtmyjdL4GwX+F0q0A/4YRsSHOj0VfkZkxyhhw8mEI1Kkq6tTprHyDwiQ4oo1ssMqsvT399Hg4IiZpLoKa6SI1ffDx+Q6UsRJs9NySBmfErnirNIB4RfCRd8E5lL8GBwaFn46VTJdHwr+30IaPy5MgAmMTwBCYWr+MuozBNK1k0eoXSz2hgohBAViCBcmoEcCEAOR7gplIDOdfJMyyPsWCIgfGRkZ4vvi7HlcecocIHKsvrWbym+YRSTzyPfWwAAjZcSHsTDiKRPF/WACs0iABZBZhM1NMYG5JIBFwpgwf/niwgSYABOYTwSQ1/iOOzbSoYPHhHH3Nfrgww9lVAjy6s/XAkEDpuUQNyB6qIta/FCOe3sbpO8HBBT4ZHR1dwghw2IXJSJTZQnfjs7OVmmCjlRRQYFhFBAQRD7CGwSlo72ZurvNdgILjg+LVIrdEE4GzDJ9FnxEjIbZ+5qJ8ZlEOiGMD6Wzo1UKQ2o2aqN0CDxcZo6Ap3qAzKX4Adod7W0yTR/SXyGaa8mSxVKs5cIEmIB7BBDdmZOTI0X9luZmamqsoZ6ODimGcGECeiQAo/OwlHgaMIWJtIoj0YSK+BEW5hDGqxNIlsFhOnGxnn6x6zx1do9854MnyHMPFgkBJE4nJHiYTIAJKARm7y9TZs4EmMCcEujuG/h/7Z35k1XHlecP1E7tG0VRxb7vAgmQENqs1bawLVltW2NbTIzc3eN2z/zQf8H80H9AeyJmOiY8PW53tJe2ZVuWbcm2doQ2kNgRa7FUAUWxFFD7iuZ8873zKutyX733aq9634wggPvOzZv5Oedknpt5M1OOnr0m5y63DCpHdVmRbFpRIfm5WRNaPj6cBEiABIZLIFcH5x968GG5a8tr8qdXXpUP3/tYdu16V5599uvTchXIcDhhO6zKyhqZU71ITp7YJ+fOfqYHnLfogeiVsewwcXLt2iW5ePG8uzZ/wXIdpK10B6MvXrRSPvowR5oun9HB2/N6mPjCQZMnWP1x4WKdG5DCJER19UJ3HggOJx+PhBUwlZVz3fkfFy7US2PjOfdsfxuwLj0f5OSpfe6sGKaxJTAZzwCxyY/Suxe7Mz/Gc9sro93Y2CjvR7e/2rJhjWzaeLdkcEu5sTVG5j7tCGASZMWK5XK5sECuFRdJbhcntKedklmhpAnY9Ia/n0NZWanMnz9/2m4HmzQcFezu6ZOeXp53mgozypLAdCXACZDpqlnWiwQCBNo6e+XtTy/KHz+uG/TLIxvmy/L5xZwAocWQAAlMWQLY57d23nz5yhe/6A5Dt1Ug27bdLwsWLBi3emEgMzv7zoPFx60ACR60YOFKWbPmHjcBcvToJ3Ly5H65a8P9sa2xOjpa5PCh9+X4sT26gmO2rFi+XirKK93vK1ZuEhywjsmF93e/qv9eLtVz5sWeWN9wWq+/pqtHbsj6Ddtl4aJVukIle9wmQLDSo6pqgU6CLJazZ07JsWP7BWVapHXG5AgmaI5+tlf273tfV6q0qZ5Gf9IfE0i9vZGBuMxxXP2SQO38WQkEDzxfvXrNuO+H3tHRIR9+uFuOHI18qb502XJZt27tuJ09QkMggelEAFv6LFy8RIp1BegtPffrWnOzdHEiZDqpmHVJkQBWfJTohGB5RaV+vFKa9n1LZsYMWTqvVL7xyArp7I5sEQakZUV6JkoZD0FP0bwoTgLTggAnQKaFGlkJEkhMwA46z8nKkO7eSBCAfzORAAmQwHQggMGQ++7bLvc9eG9sFcjbb70tz/+n58dtFQi2GizVFRNd3RfvWF0xWow7dRXDcBMORt++fYfU1X2mEwR75Rc//5+6EuSIThwscgP3Z88dlT0f/0nwjIcefkY23f2oruKInP2xfPlG3Vbs6/Lb3/wfeW/X7/SLui4dvL1XCvKLpbm5UQ4d3uMmThYtXiaPPvaczJ+3NKkB5nj1SXWLKmzfVVE+WzZvfsiVY//+d+SnP80TnJWCMjY1nZUDBz526DC5E++5w2WL+7q62qW1tVnKysru2JZsJPny3pERCFv5YYcqjyzn1O6+0FAvr7z2mly+fEEWLVomTz3+uJTo4C0TCZDA8Ai4Q47Ldfsf3eKnZt486e7ulk6daETq0zMRMnVinH+Tw3S2A/McbDtZUFDgVhQiFmXC9rAzZYVOgCycEzkvz5hkzJwp2ZlkRBshgXQkwAmQdNQ665yWBPL1sPOnty+SzStnD6p/WUmeFOcP7BealnBYaRIggWlBYLEegopVIAf2HJKzZ0/Jf/ziZ7J6zWodyL9n3F4IZ1fN1rM2bkvjpXODtpcaKWAcKo7tnXDAtx0wHnbWx1DPwXZQq1Ztkm89/9/l5d/+i5sowJZWOOwcyW1flZcnTz+9U5586ttumytLpSUVOrHxLfffXbt+LXs++rMcPrjL3Ysy4VwRbK/15adfkPvv/7JOOs2KnROCsznKSsvdllh+sus4a8RPuTn5Oikz102++Ae3m0x2draeTVKkWzuUSlFhSezWgoIS2Xrvl+Ta9UZ5561f65Zdr8nJ43ti9audt0Yef/w5ef31l9Q+jsbuCx4aP6gwSf4HeVy7ftUdIL9p47K039ZospwBMhlWfsCEsPrj3V3vuLYJK6Pue2CrYIUaDnVmIgESGBkBTITAl/AH22MxkQAJkAAI9PV/rufdBba/0hHQ25/PkJmigRsTCZBAWhHgBEhaqZuVTWcCudkZsmZhufvjJ92xg91/OhsG604C04gAVoE8+shjsvvDj+WXP/ulfPDhHvn5z3+ug+lV47IVFtrTFcuX6SqQd+X48b2yZMnq2AqKkWDG9k53bXzQneGBVFO7JHRiIJlnYGLi7k0Pue2icA5Iff0Jd+g5UmXlHN26aq0sXrxWysuqBq1imKEj/JgQ2fGVF2X9+vvlzJnDbqKhR7+4zdZBp7lzl+ie7Bt1W6z5+hViiUAeCRMYW7c+ofcu0W2ncnVP6qVxr8/Ur/IwSYKJFjwjXyc5qufUDqoWJn2KCovl6R0vutUsJaWz3TZduI5nYluuZ575vixftilWN5Rv0cI1skxXsXS0t7gJECRMtOAg3dFIPXqo+oULJ5VZk5b9a+M24TYaZR+LPCbDGSCTZeUH+J45c2bQ6o8dX3yah5+PheExTxIgARIgARJQAt09/XKo7oq89M4p3QJr4Dy62ooi91FocEyE0EiABKY/AU6ATH8ds4Yk4Ajcvv25dOohYF26B2Z3b5/7IqK1s0eydHno/Co9QFAnSJhIgARIYKoTmL9osXz3+W/L6VMnZY9OgLz0q5dl6dJl8t3vfmfMvwzFkH955WxdAbBJXn7lQ7fV1Eo9OwMTGCNNVbPnCv5YSnX1h92HSQKsBFm8aIWb0Ojqelh6+yIvhrPyZrkJi8zMjNjqjWC5y8sqdYKnwq0k6eiMbDWSpeddYOIC+QZXU7izOULKHu868phXu9D9QQqrJ1aGoPyCP55Mv255cr25Sa5euaiTRItlzdqtWpfIWR9YMYKzQPZ+8qZu99XqDmovK58TrN6w/990pVFOHN+n24IpVz2PhmliCfgrP154YadMxJkfRuD69evy8ssvu/OJbPXH5i33cPXHxJoIn04CJEACJDDNCTRea5f9p65Ic2tXrKaLq9tl27rIB0XTvPqsHgmQQIAAJ0BoEiSQJgR6dPnnx0ca5d0DF6W5rVNutHXrlxF9snp+hfzt19bJ3AoeBpYmpsBqksC0JoC9j7fdv03+8wvflauXb8ip08fkRz/6seTr6pAdX9nh9gsfy4Tnb713qxw/cVw+0cH2Yj0TpGbugtiKiOE+O2wiYLh54T7kh8mG7OyBLXjsGUM9y1ZaYBIiLy+ydZbl5//tly1efqleT5TnLV3J8tqr/+a26MJh6N/85t/L2rVbdHImS7dAmCEXL52Xgwffcwe5L1q0xq1KwYRPvHIky7f5xlX5+KNXJSu7VZ544nl9Xl6yt1JuDAj4kx/f/7sfyMpVq5M6j2YMiuLOJHj7rXfkpZd+J7duNuuk3F3y7Fe+xtUfYwGbeZIACZAACZCAR6CsKF9Wzi+TLh3zsDS3rFDKikdn9S9hkwAJTC0CnACZWvpiaUlgRATqGm/J2wfrpaf3tvTql7JZ+mVuSUGOWw3CRAIkQALThQC2wvrSl3ZIQ/15+d//fF2OHjkg//TD/yXtug//c19/VipnV43pgCgOZH36y1+Sn/z7r2X3e7+R7Q8867ZmGo2VIKOto5EM/o/k3tGuB/LDCpbq6oXuTJL9F95xj7hw8YtuouPmzWty5MiH8sH7f3CrPzZuul8WLFw5Ip18rgBuaL7vv/9HPQPllG699awb2J6IA7bHgudI8pyoM0DCJj8m6kBYrLw9ePCg/OTn/+baoOKSMnnuua9qe7Cdqz9GYly8lwRIgARIgAQSEMjR3S02r5oty+cXD5LM1O1WcTYqEwmQQPoR4ARI+umcNU5TAtmZM6VWV3msmFeq211lSl5OppQXzpJFNYWSr1/HMpEACZDAdCJQXT1XvvOBQeflAAAgAElEQVSdndLa1uW2wTp48BP54T+166HfDfLY41+SNXo4OiYqxmKwGnkuWLhInv/mV+Xn//E7efONX8g99zzqzgSxVROTbfJgOugeq1Lu2fyotLY2u3M+cMg7/vgJB8k//IWv60qN70hxUVnK1Y4ebSLuzI+L52Tfp2/GJj+2bt3Cge0o0Yk4A2SyTX6cPnVCfvKvP5Zdb7zrtr567LFH5Bt/9dyYr0JL2ah5AwmQAAmQAAlMMwL9/bflVnu35GZlSOGs7DGJ96cZMlaHBKY9AU6ATHsVs4IkECGAAbkNyyqlurJAijQIKC/CPu8zJUO/gsjMmCE8DJ2WQgIkMJ0IoM1bvHS5/Lcf/EAKC3Llxz/+hZw9e0p++MN/ljff2SX3bn1YVq6Ypwd3r5G77tow6ueD5OjB29h6Z+d3MuUPf3xVdr//ipw5e1TPrlgj1XMXapkiZ1JMJ+aToS6Y1MA2VIsXr3MHtV+6dE4nK3p0q69s1XGZrNAzWZbrYejgj4RzQ1JJHR0t0ni5Xg+QPyaNjeeksLhfnn/+edmwYUNs8gNf/iONxeRaKmVNJ9mwA88nauVHnw66HD50SP7vj/6P/McvfyMdHe2y5b4t8r0Xv+cmRplIgARIgARIgATGlsAlPf/j5ffq3EMWVBUItsOqKsuVqtJZUqw7YDCRAAmkHwFOgKSfzlnjNCZQrStA8AcH9eIF/cqNDjl+rtkR2ba+hgehp7FtsOokMB0JYAB04eIl8uL3/quUlFbKr3/7a9m394B8sPsDd0A6tqT55jd026KqSikoKBz1AWs8f+myFbJzZ6VuhbVb9nx6RA4eOqVngxQ63Dk53IN4rO0Okx5+Onf2iE5eHBnWY7u7e9x9OOsD6b771si2bffLPD303CY7WlpapK7ujJSVlQ66PqwH8qakCPgrP1588cUJPfAc+j9w4KD8y7/+P3n5pd+6yQ+c+/HX/+WvZQtXCCWlTwqRAAmQAAmQwEgIYJzj/OU2+csn5+TazU7JzprpJj4WVRfLcw8v062xqt14CBMJkEB6EeAESHrpm7VNcwLo6Nu7euXUhZty5Mw1OVV/Uz6rv+bOAamtKpRluj0Wg4E0NxJWnwSmGQEMTGOAeufOnbJ+3Xo9IPtNtwKk/uwl6ezqGPPa4vk4eP3LO3bIvXo4+omTp+RMXZ0034hMPo95AdL4AX19vW7rodFL2ZKvEyrzqtfroPYad95HVtad2yr09vZIf2/v6D12CuY0XmeA+Cs/Xnhh54QceI6Dzi82NMjxEydl3/598tqfX5MjB4/GVn78vR7E/sQTT476KjMzCwz03Lp5Q20xa0wmcqeg+bHIJEACJEACaUxgpu5XitUeX71/iew51ijnLrdIw5U2R6StPb3jszQ2C1adBIQTIDQCEkgjAtiUY9+Ja/LLt4/J8fpmae0YCADqdFJk8dxi3Q5rZhoRYVVJgATSgYBNQnzh0Uf18OuN8vSOZ6T+fIO0tbVKbW2tlFdUjvrqjyBXtK1VOmCOPw8++IBgm6Tb0YNA+vo/d1sRIvHfo8shqAefcaqs8UKdaFsrrCS6557N7rGJZMPKNl2ujccZIJNl5UdbW5v860/+RV75/Rty9swpaWm9Jbk5s+SxRx/R1Wd/K088+fiYTX7AXjD5gRVm+fn5er7NF2QmP2WZLm7EepAACZAACQyTwNKaErfqY9u6uXK64YaOgVyV8pI8WajjHfzgc5hQeRsJTHECnACZ4gpk8UlgOARutHVLd2+/VGgQsGhOkaxfUilLaksEgztMJEACJDBdCdhECFZkbN6yRT6fwLMaUBYbqMzMGCDOf0dYjBYHTDSBtf2N3C3vVJ+RjF+k86RHMnxGSybszI+JYt/V1SX7j34mJ45/pmfNZOl5MPfIo489IV/9ylNjcr5QkCEmmy5cvCj5s2ZJP1Y9ZXBv8yAj/p8ESIAESCA9CGBVZH1Tq/uos7w4V1YvLJcVusvF5lVz3EdGs3VShIkESCA9CXACJD31zlqnKQFMbyypKZSntiyUzu4+WTm/wi0PLS/K068VM6Sn77bkZGfwq4g0tQ9WmwTSiQDawxk6MM40vQnYoPhEDY5Pb7oTUzt/5Qe2vVq9es2ErrbJzc2VrZs2S5meKbRm5UrZtPFuWbdurVTOrhqXcmG7sdqaGrcCJCMza2KUwqeSAAmQAAmQwCQggDNOf/XWKblwrcV95IkVILV6Buqcsvxx6ZMnAQIWgQRIIA4BToDEAcPLJDBdCeCrhx3bFrvqFc6K7I1+ubldDp66Iq2dPfLwxnlSrGeCMJEACZAACZAACZDAcAiM1Rkgk2nlh3EpLimVv/7e96RLzwIpLS0d93M48PztD2x3Z4BwJe9wrJX3kAAJkAAJTAcCWO1bd7FVDp+7Kqd1e29s+Y0zQLbooeeP3j1fFurOF/wgZjpomnUggeER4ATI8LjxLhKYsgSwHBQTHAgQrt7qlJP1t+SNvefk09NN7jD0wrxseXBjLc8CmbIaZsFJgARIgARIYGIJjMUZIJPlzI8gWTvfJ3h9vP7vtvnQbf2YSIAESIAESCDdCWB3iy0rqx2G+qYW2acfeXb29MuS6mKZX1XIc7LS3UBY/7QmwAmQtFY/K5+uBHAYOlZ9vPTOaXnvUIM06VLRnt7bDse11s7oIbzpSof1JgESIAESIAESmEwE/MmP7//dD2TlqtX8inMyKYhlIQESIAESIIEJJICzP3DGx2I95HznU6vk3jVV8tHRJjl85orMLSt0h59n6AcDTCRAAulLgBMg6at71jyNCeDg37bOXjl1oVkarrTpVlh6aKfukbl1dbXcu3quZGcyOEhj82DVSYAESIAESGDSEAib/MCqByYSIAESIAESIAESwM4WR85el/ONt2SpHniOra7uWTFHDz8v07NA5kmvnnOK1R88+Y+2QgLpTYATIOmtf9Y+TQlg78uaygI34ZGXkynLakvdAWFL5pZI/+3bckz3y8zSwQV8QcFBhjQ1ElabBEiABEiABIZJYLTOAOHkxzAVwNtIgARIgARIIE0IYFvvP390Xt7Yd05Wzi9zYxxbVs1xYxmr9P9IPPsjTYyB1SSBIQhwAmQIOPyJBKYzgbzsTHl88wIXHFTpweg4EL21o0c+OHxJXv34rJQV5Mk3HlvuggYGDNPZElg3EiABEiABEhhdAqNxBoh/4PkLL+x0217xo4zR1RNzIwESIAESIIGpTABbeyOVFedIjo5v7D3e5A4/P1R3VZ7avFC2ra+R3OyMqVxFlp0ESGCUCHACZJRAMhsSmGoEMKmBiQ/8QeBwRc8BeX3vefnd7tNuW6yKkjzdOxNLR0t5WNhUUy7LSwIkQAIkQAJTmMBkPfB8CiNl0UmABEiABEhgWhHo0sPNu3v6pLI4T57cstDV7S97z8kZ3Qprvx5+vl63+MbuFiKcAJlWimdlSGCYBDgBMkxwvI0EphMBTH68srtOfvd+nVy72enOBHlgba2sWBBZMopDxWbOmMGVINNJ6awLCZAACZAACUxCAv7KjxdffJEHnk9CHbFIJEACJEACJDCRBG61dbudKy5ca3NbeeOjzWcfXCaz9SPOP3x4RkoLc2Xj8tmCXS+YSIAESAAE2BrQDkiABByB5lvd0tLe7SY/Htu0UJ5/fIXM1tUhJxpuyGn9s06/oMCBYtwOiwZDAiRAAiRAAiQwFIHhnAHS1tEuTTevyV9O7JH8tXMF216tXr2GccdQoPkbCZAACZAACaQZAXyceaKhWX765nFpam6XC01tsa27H9MtvhdUF0tW5kx3vinHLtLMOFhdEhiCACdAhoDDn0ggXQiUF+XKk/cukI6uXinXrya+9sASmVuR7yY/fvnGSfn0dJNbEfLdp1bq9YJ0wcJ6kgAJkAAJkAAJDINA6BkgupL0jvS57d4tcqm9ST4+c1xK714sXPlxByleIAESIAESIAESUALYmSI/L1tKC3Lk9IWb8vbBescF55diJcj6xRXu/5z8oLmQAAn4BDgBQnsgARJwh4quXVTuloxm6L8xIVLf1Cp/2H3WBRQ9vbd1eWmLtHX2yu3bkcEKBhQ0HBIgARIgARIggWQItPZ1DinW0HFRWs5ck+q16ydk5Ud3d7f09/dLbm4eB0yG1BR/JAESIAESIIHxJ4AxiKu3OvXMj363SwVWdzyzfanc0K2wMAnywWcX3YecFXoeCM44ZSIBEiCBIAFOgASJ8P8kkKYEMAliqzsu6V6af/jgrLyx75yb/FhaGwkwaioLpLWjR85ebnGTJXPK8jlQkKb2wmqTAAmQAAmQQDwC2AKrfUaW7Go9IrJPJD87L56ofNB/WBpudcjKLZvl+3/3g3E/8wOfdRw8eFAaLzXKU198SnJycuKWlT+QAAmQAAmQAAmMLwFMfpy+eFN++26dXG/tkOceXiabllfJ1rXV0trZI//++jHp1ImRsqIcyc3igefjqx0+jQSmDgFOgEwdXbGkJDBuBLDS48LVFunu7Zd5swvkrx5a5gKMjJkzZdfhC/Kb907J0rll8sxDS2RpDffWHDfF8EEkQAIkQAIkMAUI5OXlyaaNm6Snu11OdTRJ/6x8ydAzPsL+7mkrkW9966vyjW9+201+4IOM8UzYmKvlVovcuHHDrQJhIgESIAESIAESmHgCmPi4Hd0qs05XeXxyslGabnS4gmELrFXzy2T7+lrp7ov03Q/dNU/PM82e+IKzBCRAApOSACdAJqVaWCgSmFgC86uKZMe2ZVJeOEsW1RTKwxvnSV52phyrb5ZXPz4rh89cl5u63HT9knJZPLdYenpu64DFjHEftJhYSnw6CZAACZAACZBAGAFsJfXoY4/K9ge2h/086Fpvb69k6YqRgoLCCVtV+sCDD8S2wEpYYAqQAAmQAAmQAAmMGQHb7upk/S3J0fmMNYsqZMOySll9tEInQOrlo88aJU/jhm8/udJ9jPmV+5e6suRkZ0jIaWNjVk5mTAIkMLUIcAJkaumLpSWBcSGQq8HDfWurZPn8YreMtFgPGLt+q0ve/KRBDtZdleysmXLP8moXiPT1fy57j12R5pZ2WaqHjmE/Ttxvx5oyCBkXlfEhJEACJEACJDBpCOCcMGwlZdtJYTAD14J/T5YCc9uryaIJloMESIAESCDdCGDc4HPvnFFsuf3ugQZ55f0z7qDzF59e584rfebhpdLc1il7jzfJp6ebZNn8EsGHmxh7YCIBEiCBRATGd415otLwdxIggUlDAFtQ4AAxTH709d+Wz87dkPcONbgzQVboRMcjd9e4M0Cu6DLU339wSn70xyPyweFLejBZn5Nv0EPUG/Uskfau3thkCIIbDH7Y5AgqG3Yt3vXJLOuW6CZZt+HIBg1jPFnYs5Kp31jJov6pPN+3sclkTyhXMvYfrGu8OhgXyPspnn0km6+vx0T5Dkd2suonyCeMo+kwWdlk/d3yncps4tlpGMfJIpusfnxf83UUVje7lqqNTDZfG00dzdDJDyRMgiDh/8nwGc0yJNJbomclU94we0iU72T0+bHwC3CwfJ0RRFM8ZmMh6/tmojLEK1fY9bBryD/seti1dJBNxs7BIehn8djEu47n2B/IWArjHnZtqHyTLdt45hvvWcn6sN0frFtYvsORxT1+ipcvricrGyzrdNFZPDaTsb5h+hpuOXEfxg3e2dfg/mDsAOlWW4/UN7W4jy93Hbgo7bpF90rd8upLWxe580kXzSmSBVUlbhcKJhIgARJIhkDG/9DkC+7atcstQ7/vvvuSuZ8yJEACaUAAe2/eau+WC1da3T6cO+5fIvevq9EzQWbI65+clzc+bXATH9vWzXVLVK/d6pTf7KqT9zRY6ezuleryfA1a+uTd/Q2y5/hluX6jUw8py3VbZh3XbbVe33tezl26JbNys9y+nc0t3TFZ5FtePMtR9mWL8rOlALK6MsXy9WUPn7kmb35a7/KFLPK9pdt2/fnjc/LpySuuvFU6gdOpf3989Irs1rNNIFuq5crPy3KB2F/2npMDp6/GZHv6bstHR5oGyRaobD1kP0lN9rPz1yU7M0Prliddemjbe/svyAdHG+XSlTaZq4fN47czWp43Pj3vypCrW5CVqSxCPMjvP9kk7xy44OSLdJJqVk6mNOi/weLw2WsyY8YMKZqV485xOXDqSkLZssJcx8JkoaNZWjd8UVOnh869tb9eUGas+KnQcuDLnA8OXXJlhmxRfo5bGZRIFuUtK8q7QxblRRn8fFva+qSiJFdm6m84+A5lwP6v2boqCfr89MRlVy/TO8p29Ox1nai7IA2XW50eYSOYiHv/4EU5UHdFOjSohj2Bi+kdZYJsvtrf2cYWtdsLjqHJIuDee+yq0/tQsrAnlN+XNd0g3zfUVz47F9F7qda1pX1A1nwCeneyqnfIZun/IdvR3Se71Z+MN/xnkKzqBrKwJ7w4+LKzy2YNsifLF7LYRxf1Nb+srihwvmb+c6KhWfJyslwZsArsdfUJ+A/KGyZbqDZnK8ZMFnqs0jJAj5+o/0Nn8LXKklnq85muDG8qG/PLuZWFuhXN7Zh+48liS+DZmke/vjiZLfiyl1Tvb2sbgHwhC7uFD/u+BlvM0zKcha9pGeBrJgt/MI5mt/CH02qDb+2L+IP5GWzE/MH3SScb9Z1ENu58B/mq3sG8WH0KNmL5gmNJQbbTpeVr/hBPFm2s+aQv23i93fkE9G5+dvu2OP+xdhN+hnxHIgu9wy/D2ivkC/3A16xdgX6CsvBNTLTDRnxZ9EWmd9OP2dOrH52JtVewEZ+jr0vzNdM7ZH1fg+xs/RAAzE+pLv+055zzDXCtUNsJk0VbfarhRqjsm3vrB/ka8oG+0QfC18x/0Fe9rf2q6QccTRZfRKItHkoWvnbsfLP7ehL5QxZtkXE0XytRv4as7z/WD8Ne0QehfTAfKinITdh2xpMNtnHWzoIt2gy/7RyJrGtno/0r2q0rNztlj/ZTftsJltAj+gprD1GGqxq7+O0h2lmdqon1K8F848nCh05obGPtN8pgssH+CrLWr6Qa0/htssU08As//kFMY/FBMKYJi39isl5MU11eoHofHP9Y++3HP2HtLHR5Rx/k9Vd+/GP9CvRzUn3I2k7rg+L1bUHZsPYQsUSmnmN3qC4SG1rfBlnEnH5/hXYWZYBffHD00qA22deltcm+bLx2FmzgP9YeIl/EKfBLtHEWp8Dnk5VFTIP8/HwtLrSYBu3W+4cuDmo748UpqcpaP+jHhRb/+P0V8oUs2GAC1OKfsL7NyaLPjMY/iFPQHvrxptkT7P+jI5E41I8X8a7wjsbCeJ7/XhFPFu2krzfYQ1AW/aP1TYgzEvWPvqyVwfo8tJEWF/n9o/8elGz/CNvz+0yLadAHgSP6NshYjB83phkiTrEYH/FPrF9RHx4qpoGvBfsgxO3BmMZi/MmmM1+/FkcOR2eXmyNxjukc9Q3aUph9+G1pUL/Qgx+XxZON924Y730v7D0y+F4GW/DfDQ+duure5xH3WF/gx7B+exn23mDv6NlZmbJb++OfvnncvesvqSmVWn0X/lwrd/5yi9a3w72jzqsslgVzCtwHmPj3A3fVyNrF5c4fmUiABEjACHR3d8vvf/97N6dRU1MzCAy3wKKdkAAJJCSQocEzlp3u/OIaabrWLmuWVLgzQS5cbdMBzSa5pi/3OCx9oX6JgcDshr6sYLXImcZbLm8cTnb5Rrs7PwSBzcZls2XlwjLJzETgfkV+8dYJqdYBnrKifBfwXG/pjMk+tmmhrjgpk/6o7E/+dNRNXEAWARBkcSj7OQ2QTFZHq+SIDm6YbHVFvszVgd3rLV3yq3dPSZMGpJDFZE1Xd7+uXLkob+w75/JdUF0slfrCdf5ym/zq7ZPurJMd25bqeSez3UA7ZP/4cZ1bHYMtv/A3ZP/tz8fc777sGzpY/PbBerc0Nyhr0JdpHrjvTyqL/Uw3LKmUTSur3GA8AkPki1SoL8jY4xRfzWKw9aOjTbry5rT7EmZ+dZGUawCKcvzhwzPS2Nwh3/rCCqnVOuPlKhlZ6K4tkG+VcsPgwDF9EQWLTp14Qb7L9KsbsHwXA/KfXXRlgCxeuIOy+C0ou0TvN1mrH/LF9mmQBYv9OmkDHS3RM2jwQod8IVuigwWFeugd9Il6YeLpqzohBxtB2nvssvzu/TpnT+X6ogEbuXC11dkIuJgsBlKgH5QfdYcsBjpP6KAhbAR6f+KeiO0lKwt7uqUD1n6+1WrP0I3LVxki5eiAH87O8WXhE7ARcAnKQu8YADDe5j8mCy556nfId7nmAT0aQ8jCX/GChXxj9pSbLbA9N1kZ9R/I3q22B18Dx1++fUL30s2UisI8V174MOzrhL5Y37u62snC13zZ6rIitfdC55fgiC+3HtkwX/1H/T0jK2aL8DXUt7w415XB/PIbj6yQe1bMGWS3kF21sNzJ4qXPl0W7hMkss3Ho0mShd5QXbcPOp9Y4O4XvmK9hJRt8p7QwJ8YceocsbNznaH5WrANjQRvHM6FLY+77ZCLZbatrBtm42QiYRzgO+Jn5A3Ri+cKevq/+kEjWfAeDzZA1n7B2E36GNsf8x/wsTNa2RjS9Qz/wyaAs6rZ6Yan7Mu/g6WvOnsAGPoE2FmUwX4NfgnkfJpm9ts1kTe/QD2SRT7+OIvh6d+2g2Ui0vTJZ6Mf8B/pCGwRdmk+gDYbeYU/wNd9G1iyucH6JATL0VZBFwsGbsJHf7j7tvk6EPUEWA+nQD/ofJEwM4zfIog+ELHwC/oP+8qhOAEEWejX/QZvjy8J/sK+19Zfw9wFf6xrUXzrmWregLPpW339gTwvmFOuHDDN1smWgD0TfOlfb85k68I8+6N9fP+baQ/gmvrZEnxLWdkLW903IdqlvmuwdbVy0nUW7hT7Sb2chu1DbnFh7qLLg7stavw07GyTrtbPIF+0LdITYw2QReyBGgD6t7TRZi1Mg69qoEFm0h8g3GNNA38F8g7KIEdBfWRmge+uD0F+hnbX+yvogv02G7EBMM9B2Il/0QdCn75uIaSw+CMY0Fv8gprE4JSz+2bS8yvH3YxprZ09rf2DxD2wEvunHNOgr0AfdEdNoH4T+ym+TrV9xH16ov/n9FTiG9W3I129n0XaCEezJ2mRrZ3NKM2Js0MZZO2t9G9rDmKz6o8UYsJFCLS/aOPiQH3Oi7YQuTRb5hrWz5j+wEZNFnOK3h7E4RT94gP8kJavtYVAW/0e+iGnAET6M+M3aWT/2sDYZNgKOQ8k624vGNMjX4kK/nTV/B0cMVKJdM1nU5zuPr3KxrN8mG3NrZy2mAXPEKUFdmj0hzvDjRfRNsH+0qSgbnh2JFyPvCmGxJWT92NLil2C+2A7Y13FYn4c2FWXA4L/1TdY/ok31YxLIoq02HSOet3cbk7X+0eJu+KvfP6LPQ7ms/ba+DTGN+Y/1V4ivgzGN9YMWX6MseFZYTFOrto8+09jCnl54cpWTDcbtQT0gX/ga6pWM7GTRmcU0FkcOR2cW59h7p8VPiezD4tBgTASdQw91Fwfea76lfBE/hdlH2LshdBaUxTtcrJ+I9s1mC77O0F7Yu+GB01fcGAD6X8Q99i7st5f+Owb8fIP27bk5A+2w9Wd4z8Mh5og1mrSvQT+KZ6GvXLd4tnvvwPsE+n0wxLv39g3VPOvDtSBMJEACqRDgBEgqtChLAmlKAKsO8AURBnwwkIN/Y5uroxqgnI1OcizSl+w5ZRhwv+0mSRDE4MW0tqrADWLIDaxc6NOvPHt1VQi2yYosdMa/W3R1CQImHISKrxd7NQ/IYmATCbKZ2lpBFveXFEQGoEwdCMRx3U++bHdP5BeUDS8DQVl86d/c2uVelnp6B/JGvrje2TVwDbJ4GcNvvTpYZwn5BmU7tT6QxW8mizqivsGE8kIW9Y7lqfXHvcZhUP20TLgOeT9vcEf+yA+pTwcJUX7UeShZBNlIYbIWlLrfLV9lifpZvu5mTfFksV9rrAx6r8nay5nli+umO7C2hHyNm/FAWSN1jdhJ5N6IHAYhTO/uuurrDtlo+fGbpZ5oAO7ryGzV2W4CWfesOPnavaiLpZis84kBe4IeYY/xZQc2CnA2gj9+vlFfAUuspkBC3VCvHH1hxr+RYDsxO3dliPgafkN9wdq3P5TL2X/UDiDXqS/lJgv7dvlGfc3J6jXkiz/QmfmaE4SslgH5Ov/RvCyZLeZkD/gL/DPmayGyyAfPRoL+8X+nN3DAZ3eazNfCdGmy8AdwM1v0fcdsHFzwe8x3osxNFvWF7IDtRWShZ8sXbCxZvk7vsbbvttvrGOWCP5gtxpM1ewrKgtnNtsiTYm1vtN30/Qz8g37mmHmy4NKnAzyQRTnB2LcRk7V6Ob1HbQRs/DYW5Qr6Je5DfTGRbgn3DLRtd9rIIF2q7ADzwb4GG8MgExLKBT9AGUzv9jzfRswvzX+wDaQlsECdrE+w69AP8sTkhy+POplPDNTt9h3+g7LF8vVsBPeAV3fP4K8dIWs24tfB2lfzS+Poy8IvYAO4Bl8zWeTjbDyqe+jQ+TH6lJA2zrcFk3U+ZLJeG2ftrN9umV0GdRFP1srs+xDKjPL6+aJ9MT6+LHRsfKzt9GXhd9A9/BuyEb/QfkV1i7IOyjfadrrnR2WhIz9f6N7x1DYwUlevHXBlNs15/ZUXe+BXsIEfIKEHsHbWMfNkwR+2jn7Q/A16jekyKgtdWkzj2s5oHABZa5OtVM6P9Xospom2s/gdbGDng9pv5QDZG1oO64NQf2tfrA/C/TF/8ziab/pti98m4x5L1h7i/5BHHBlpd6K+7fmQtUUma3nE+gqVNVvEb6Z3kwNPvz00WYtHTM7+dvbo+orB7Rby9fUD/Ye1W1aGRLJWDuQLWzO9g6Ofb6y/ivqlL4tnBcvg+quAv+NZwXxx7yBZx3Eg9gBfP6a5UzaiM9M78sO/cSZAUJdmT668agfWnsViHZGt/dMAABJESURBVLXFyPMGmAdl8X+0fbBb6x8H9WOBfF2fF4058b4CWZTL9HZH+6uykLE2Bs/zYxLrd/0YHe821v5CdqB/9N5LNF88y49JYm1hID4d8MtI/GP+g7bB3R/VD/zHtZlaXj8ZW5N1eo+yhazft/l6sDxMFnq35Mc/KPedOhs8NBXMdyrqDGwH2ajG4LAlMLT2PGgffvtmsn5MBPmBmMiLQ6P5+vYRJhvv3TBSDovLPLuL+kMwX7Tv5uf2fou+z9p3e8dA/V2f6uLwSPyCa/gwra1dYyWdRMZkCHwLH1Ue0g9FHt+8wE103LumSlc1XnEmtHKBnjMaXe3BTa8cEiYSIIEUCXACJEVgFCeBdCaApev4KhQJ22bgSxQchp6TfVW/8K7U1QLZ7mu+Op0UQWCHbZFml+S5L4CZSIAEhk/AH5AZfi68kwRIgARIYDwI+IN+4/E8PmP6EWC/P/10GqwRVoswTS0C1Nno6QuTdK2dPW5rPHxEiYle7B5xSVfanb+sW+XqamGsAvnBsxvdQ7HSBSvamUiABEhguAQ4ATJccryPBNKcAIIVrAap+HJkf14sf8X+rjda9UvuW5GvzN3yfl1W66fe/sFfGMXDiK/Q/K+L4snZF0xhvyfzrMjXKGF3R76+8n+J9zLqfzEbnlP41WTqZ19Z2Zekfk7ui0vvi6/wp4ze1bCvw4yxfTlqTwvK4no8ffiy9iVRqqWGbsZyoi34lXGq5YO8/yXgUPfbl1PJyLuvJXWyEas6gikZ3sF7hvt/PAtfu/tpPG0z2XIP5e/x8kDdkq1LGPN4egyTRRnC5OPJBsvsfzUY/C3R/+O1b0PdF9aGWfnhM+DtDxYYR3yhHS/ha8dk6mtfMicjG+9ZuB7WVpl8mC7wW7Kcw/gMVRb/t0Q2F+x3wji4tsH7+tbyD5NNtlyJ5FKpc5B9Ku2syfq6sLYzrIxhdQ4+P+w+/1o8ewi7L0w2Hhtf1vrSeLJhz0r2WliZkr3XlwuzzTC+qeaNr4sH8Y5+kR7Ua7xnpapPe1Zs5UsgPvVX1QwqV7Sc8ew1WN7h6hIxTbwypMp2LORHak/x2tF47RbqEHZPPHuAvNnEUO8KQTZDyYbpMh6HMNngs/yYJMyvgvL4/1D1DZP3rwV9bCj5yMqP5N7ZhipXGJ+h6jBcPx6qLkG2fpn8FUWms3hlCMaQwXzjlSGsvsnYh+VnssF3Lf95VrZgvsFYIV4ZnQ691XJBuWAdgs8Jyvv/9++12A2/I49W/XASZ0HhI8qlc8vcypbS6Cp+xIoYX1iv24siYeyBiQRIgARGQoATICOhx3tJIM0JYCssLE/FH0vY23PzytnuYOE8/XetHmiM1SJZKju3rFA6F/Tr2RRFscFqrBDB3tNz9aD0gvwsJ4vzRZbMKXFZlhXnOFns6wvZpfr1B2RzsiNPhCz+H5PVZbSQxx6pqxaUuyCqMD/S1OGQNmzVhQEh5Is8sZQW23Qtdlt44RyLSMbIH8tx3Z68KouEfMu1DCaLA+OQUG7I4suVmtmRsmBP6NrKolBZ1BepXA9oRXKyymRxdbtjZAOGBbNy3SQTAmzsn2sJ5UCZwAKc/HJY/dzKG7DQiQGTRd5BWZQ5kSz2YUb9kMDVcVOGKPOqBd2uzJZvUBb3BGWhM6ShZG/UdjvWOH/GZMECeYELEuoFlmVFERtx1/TfZk+md+g0ZiP6OxLyhX6W1ipztR8cwAfbw97D2D8bS81dvlF78mXNRoKy4ALd+LJZej8SZI0h6o3ky4Il9j1Hwh78QVns/19eqOfOqM4hi/+jvJAFFySzEdiP6cbJat2QIntWR5aY499I0Bvqj5dB3y+hZzwLyXjDh2FfSE42Wjdwgq9h6Tp8AQk6BkdcAw/UFX/gH+Y/yA8JZTBZ5GXJ7BY+jLYGCezhp0hzygd8ImbjWpeYfanfmz/E7Bb+br6mslYG0w98DbKom7ORqI37fmZ7zEM25jvQe8AfUF/fxgdkPf0oGzwnKGvMfd+BP0DvSJav7w8ma74TTxbPgr2bT5ifuXYl6j9gbmUIysKeTBY2EpQ15tC7bVWAfK29gg5hu/gbsmiDfL3DRmBPPnMn6/klGEBH0DvsCXUxXfqyZiMoL/zHZGFPjrnma20s9I5kekd7jPLBL61tsLYbukQCC7R/N6u1HdQyIE8k2Ij5T6ydV79EneBrKIsl/A5Z33+Qj8k6/4m1I1kxfzdfQ12sb4UurQwoY6zNjPqlsQFz9HtoK9Bu4d8xXUZlUT7YALibjiJ539l2xpPtuz0gC/+wNs7aTgywWbsFe7W2M9IWDbSdKIMvi3IkkrV21rVx0dgD3MELdYa+re2ELPKMyapvW3nBPhlZ3G+ysBPo0+qGfIMxjcmCO2zH+itrk8EU9u3aomh5TRY+ieTa2UBMg3LAn4IxjcUpaLdiMY3mmyimMd805pavtbPI13wo5m9a5lg7q+ytD4I9mQ9ZH+TytTYZ8U/Uh3zfRD+H5LfJzka0/Eh+O4tnwF8tX9ceqt5xb6TPjLRFzt+isYTrg6Icrb9CvmiLUF6/jbN2C3GkyUI/vqyV1287YxyVjclC77F4Qvsra5P9vi2RrK8fa79ha9bO+r6G32FPvv+YrOMYbWfha5CNxHoDfmltMu63PhP3DZRB7Skap+Bv+Lsviz7KfAK/QYemH/i/yVq7hWfg3Dtfl7jH7Am+jP8jDh0UL+q2u4ih/P4R9hp7XjS2RP3Q/g4li7bR+mg/JoHtuPJ6cYZrfzVPXPdls7IicRHeK8zOrK3OzMgYJGvtOvJFrIO21+KXzIzbThb9o8Xd0Ke135E+aKDdNL9EX4C6oh6x/kp9LRiLo23AeYNIqAMGo3EN90AW14xXxF5MdrAejIEva/UKxjQoE1KYHky//vtAPJ2FyZoefB8O05nFNPC1YEwT1Fkspom+g7m2IdpnhukMbUCsbTFbisYuwT7XtyWLR8DGbMn8D+2YXy6LLfFmFIuFQ9730GYlet9DXSwuszL4fua/70Fn+D90ioR7re+z9t3vz6zN9vs+3IePp7C1Gcr35L0LZNu6Gqmp1LEF/YjS7IMTHw4xEwmQwCgQmPG5Jj+ff/zHf5RZs2bJP/zDP4xC9syCBEgg3Qjc1v16e3Qfafu6BqtCELhgL+gm3euzpaNHivQaDrHGnp9X9SBRHJKGwStMpBRpMNmtATcOO8NXWAgucZAjAj7I4gBKBFSQxVch+GoEsh26PykCOxscTVYWX5dc0jNLsCcuBqvxIoTltS06AH7hWpvbE9vyDZPFOSc4NDcoC73jQE3UF+Wt0YMjfVn8PqdUD53TQ3ux6qG+qTUmi8M+EXBavr4s/g35Kzc63CA9uCFvK3OTXge3Cl02jKXDKDNkr+iequCOgDIVWbz0YFUPDglFsnyh40vKx/SJfEdbtkyDa+geduKXAYe7w64u62H2YGA2gvL5XMxGYHsXr6ouPXvC1+6Qhd7NnmB7zbe6YnVFvrAn1NWXNd7Jyga5QO84pNLP1/cJv66QLVMbgU/4vOE/GDDwyxBmT8gXBznCnq57dTNZYwP/MVnf1+AT4A1fC8raJA30APvyZeGXdZduOv/x8/VlcT98AvnCV8zXUDe0I74sDvDEgIlfBt/fTTboa/AH1K1aB5rMH3xfC+oSZTBZ38Yt36AuR+oPiWzcL8NoyJrttWq7ZGwsXwwMmP9YmwM7DcriIFPzNV/v8B+0m36+QVnjGGyDzNf89srXpdkInmuy+HeY3sNkg7rES7rfrpje0d74voYyQNb3NZNF+xJsuyELO4WvNegh70i+7Z3Rg8J9X4Nfghf6taD/WL8G/eAAc/SjkEV/iWR+6feBkAVzDBpAl0FZ39dMFv5+QdtH6wORL3SJqZxg3wY+SNZn+m2n9Zn43dpkv50dqo1LpT1MVbZTB3PR/hv3YOyB8ppfQHaoOGUkssE+CHnhcFk/pgF37Ivvyw4V0yTbHiYT0wR9MxjTwDf9ttNsPWgjVl6Lf/w+KCym8X0oUX+VjKzZb7w+s1HjlmC7ZcxhI347azEGdGVxh29PQ8n67aEfeyCvYPxmsaHFb/FiGj+OTEYWeo8Xp1g8Ea9vGyr+CfaDvmwwTgnGb0PFNGGyiJHRxvn5WkzjX/fbM3uvML3Ze4Xl4estGVm/H7O4295XkunzIIuYNVFfGmxPk4nnoQtrv62/sj4IcftQMU2wbwOvRDGNzytMdig9gEEwpgFbizn9/mqq6Mz67aC/G5ugzu2dJhg3B+3DjxuC7UUwfgp73wu258nYUrx8h3rfu6VbWKFPhc0F23e8u/hta9g7htk86o/JTluNg9iIiQRIgASGS6ClpUX+5m/+xs1pbNmyZVA2nAAZLlXeRwIkkDIBDGpasq85cAWHHCIh+LHFrcOR9e9PlG8qz0LZrLyJypVIFr9bHqmUwZf18/CfF6/O/vVE3CeDbCLGxiJMzr+WiG/Y/fEYTibZVOw8GfsYad1GyjwZe/a//grznzC7jedrYbKTWe9WtkR6hxwS2tNEssmwsTx8NrgvURuSyJ7CdOnnG3Z/MuUdabkStRfxOISVdzLbUyrlncz1SNYvkrGdsbL1VGwqFdkwH4rn84lkE9lDIt9M5BfxypVKvqmwGWvZePVN5CvxOITpJxU28WTDbHqs2MQrw1i3yan4NmRH2maExTrDtQefWZjtpJKvLzvWzMPK6ushUb2mms6Gq4dEPpHIlobrq6nYQiJdhpUhrB0La8PC7g2zDf9e/M5EAiRAAqNBYKgJEG6BNRqEmQcJkEBSBMICHUx4IFAKpvGUHc9noZ5hzwu7Fo9NvDziyYddD7uGfMOuh10bS9lkWYTJhV2LxytMNl5dKRvx0DAOYddSYR5Pdjx1MZ7PSrW+YWWLd835ZaA9DZNNpQzx7o93PcweUpENu380yhuWbyrlSkfZyVznsLKFXRsN2xlpvqmUYbxl6RcgHh6Xhek97Bruj3d9pHxHev9YlWsy5JuK3uLpKJV6pKKLVPINkw27lkod4t2fSh0mg2xYPcKuxbOFVOoQlm/YtVT0MBrlGs86jLS8YWUNuxaPK57PRAIkQAJjSYDry8aSLvMmARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKYEAKcAJkQ7HwoCZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZDAWBLgBMhY0mXeJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACE0KAEyATgp0PJQESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESGEsCnAAZS7rMmwRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIYEIIcAJkQrDzoSRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAmNJgBMgY0mXeZMACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACUwIAU6ATAh2PpQESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESGAsCXACZCzpMm8SIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIIEJIcAJkAnBzoeSAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmMJQFOgIwlXeZNAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiQwIQQ4ATIh2PlQEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCBsSSQGZZ5R0eH4E9fX1/Yz7xGAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAhNOoLW1NW4Z7pgAyc7Okp/97CU5cuRI3Jv4AwmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAlMBgINFxskKyv7jqLM+FyTf7Xp8mU5X19/hyAvkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkMBkJLB27VqZNWvWoKLdMQEyGQvOMpEACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZBAKgR4CHoqtChLAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiQwJQhwAmRKqImFJAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESSIUAJ0BSoUVZEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCBKUHg/wP1tX0nutz7VAAAAABJRU5ErkJggg==) # # I was working in google colab using kaggle api import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 from sklearn.metrics import f1_score, roc_auc_score import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import random_split, DataLoader, Dataset import torchvision.transforms as transforms from torchvision.io import read_image, ImageReadMode import torchvision from torchvision.datasets.vision import VisionDataset from tqdm.notebook import tqdm from PIL import Image from glob import glob import os import time import copy import csv from google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print( 'User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]) ) ) # Then move kaggle.json into the folder where the API expects to find it. image_path = "/content/dataset/semantic_drone_dataset/original_images" mask_path = "/content/dataset/semantic_drone_dataset/label_images_semantic" labels = pd.read_csv("/content/class_dict_seg.csv") labels.head() len(labels) classes = labels.name.values.tolist() print(classes) length = len(os.listdir(image_path)) # ## Dataset class class DroneDataset(Dataset): def __init__(self, imgs_dir, masks_dir, count, is_val=False): self.imgs_dir = imgs_dir self.masks_dir = masks_dir imgs_paths = os.listdir(self.imgs_dir) imgs_paths.sort() mask_paths = os.listdir(self.masks_dir) mask_paths.sort() self.is_val = is_val if not is_val: # для разделения на train/val в процессе self.imgs_paths = imgs_paths[:count] self.mask_paths = mask_paths[:count] else: self.imgs_paths = imgs_paths[-count:] self.mask_paths = mask_paths[-count:] def __len__(self): return len(self.imgs_paths) def __getitem__(self, idx): img = read_image( os.path.join(self.imgs_dir, self.imgs_paths[idx]), ImageReadMode.RGB ) mask = read_image( os.path.join(self.masks_dir, self.mask_paths[idx]), ImageReadMode.GRAY ) return img, mask # # Transforms torchvision.models.segmentation.DeepLabV3_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1.transforms() # transforms with which model was trained def img_transform(img, mask, is_val=False, size=520): img = img.to(device) mask = mask.to(device) img = img.float() / 255.0 if not is_val: trans_img = torch.nn.Sequential( transforms.Resize([size, size]), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), transforms.RandomAutocontrast(p=0.2), ) else: trans_img = trans_img = torch.nn.Sequential( transforms.Resize([size, size]), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) trans_mask = torch.nn.Sequential(transforms.Resize([size, size])) trans_img.requires_grad_(False) trans_mask.requires_grad_(False) trans_img = trans_img.to(device) trans_mask = trans_mask.to(device) img = trans_img(img) mask = trans_mask(mask) return img, mask.squeeze(1).long() train_dataset_len = int(length * 0.7) val_dataset_len = length - train_dataset_len train_dataset = DroneDataset(image_path, mask_path, train_dataset_len) val_dataset = DroneDataset(image_path, mask_path, val_dataset_len, is_val=True) train_dataset[5][0].shape train_dataset[5][1].shape img, mask = next(iter(train_dataset)) batch_size = 4 train_loader = DataLoader(train_dataset, batch_size, shuffle=True, num_workers=2) val_loader = DataLoader(val_dataset, batch_size, shuffle=False, num_workers=2) model = torchvision.models.segmentation.deeplabv3_resnet50( weights=torchvision.models.segmentation.DeepLabV3_ResNet50_Weights.DEFAULT, progress=True, ) # # By default deeplabv3 has 21 classes as an output, you need to change head for custom data from torchvision.models.segmentation.deeplabv3 import DeepLabHead from torchvision.models.segmentation.fcn import FCNHead device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.classifier = DeepLabHead(2048, 23) model.aux_classifier = FCNHead(1024, 23) model = model.to(device) # # I use Cross Entropy loss, you can also try different ones, for example, Dice loss. You can look on implementations here # ## https://www.kaggle.com/code/bigironsphere/loss-function-library-keras-pytorch from torch.nn import CrossEntropyLoss import torch.nn.functional as F loss = CrossEntropyLoss().to(device) learning_rate = 0.01 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # ## Pixel accuracy def pixel_accuracy(mask, output): output_softmax = F.softmax(output, dim=1) output_argmax = torch.argmax(output_softmax, dim=1) bool_tensor = (torch.flatten(mask)) == (torch.flatten(output_argmax)) return torch.sum(bool_tensor) / torch.numel(bool_tensor) # # Train from tqdm import tqdm epoch_count = 30 train_losses = [] val_losses = [] train_accs = [] val_accs = [] es_steps = 3 count_steps = 0 train_len = len(train_loader) val_len = len(val_loader) print(train_len) print(val_len) best_score = 1e10 for epoch in range(epoch_count): if count_steps >= es_steps: print("Early stopping!") break train_loss_sum = 0 train_pixel_acc = 0 model.train() for img_batch, mask_batch in tqdm(train_loader): img_batch = img_batch.to(device, non_blocking=True) mask_batch = mask_batch.to(device, non_blocking=True) img_batch, mask_batch = img_transform(img_batch, mask_batch, is_val=False) optimizer.zero_grad() output_batch = model(img_batch) loss_value = loss(output_batch["out"], mask_batch) train_pixel_acc += pixel_accuracy(mask_batch, output_batch["out"]).detach() train_loss_sum += loss_value.detach() loss_value.backward() optimizer.step() del output_batch train_loss = train_loss_sum / train_len train_acc = train_pixel_acc / train_len train_losses.append(train_loss) train_accs.append(train_acc) print( f"Epoch {epoch} / {epoch_count} | train loss = {train_loss} | train acc = {train_acc}" ) model.eval() val_loss_sum = 0 val_pixel_acc = 0 for img_batch, mask_batch in tqdm(val_loader): img_batch = img_batch.to(device, non_blocking=True) mask_batch = mask_batch.to(device, non_blocking=True) img_batch, mask_batch = img_transform(img_batch, mask_batch, is_val=True) output_batch = model(img_batch) loss_value = loss(output_batch["out"], mask_batch) val_loss_sum = val_loss_sum + loss_value.detach() val_pixel_acc = ( val_pixel_acc + pixel_accuracy(mask_batch, output_batch["out"]).detach() ) del output_batch val_loss = val_loss_sum / val_len val_acc = val_pixel_acc / val_len val_losses.append(val_loss) val_accs.append(val_acc) print( f"Epoch {epoch} / {epoch_count} | val loss = {val_loss} | val acc = {val_acc}" ) if val_loss < best_score: best_score = val_loss count_steps = 0 torch.save(model, "best_model.pt") else: count_steps += 1 import matplotlib.pyplot as plt train_losses = [x.cpu().item() for x in train_losses] val_losses = [x.cpu().item() for x in val_losses] plt.plot(train_losses, linestyle="-") plt.plot(val_losses, linestyle="--") plt.xlabel("epochs") plt.ylabel("loss") plt.show() train_accs = [x.cpu().item() for x in train_accs] val_accs = [x.cpu().item() for x in val_accs] plt.plot(train_accs, linestyle="-") plt.plot(val_accs, linestyle="--") plt.xlabel("epochs") plt.ylabel("accuracy") plt.show() # # Inference model.eval() label_map = np.array( [ (0, 0, 0), # unlabeled (128, 64, 128), # paved-area (130, 76, 0), # dirt (0, 102, 0), # grass (112, 103, 87), # gravel (28, 42, 168), # water (48, 41, 30), # rocks (0, 50, 89), # pool (107, 142, 35), # vegetation (70, 70, 70), # roof (102, 102, 156), # wall (254, 228, 12), # window (254, 148, 12), # door (190, 153, 153), # fence (153, 153, 153), # fence-pole (255, 22, 96), # person (102, 51, 0), # dog (9, 143, 150), # car (119, 11, 32), # bicycle (51, 51, 0), # tree (190, 250, 190), # bald-tree (112, 150, 146), # art-marker (2, 135, 115), # obstacle (255, 0, 0), # conflicting ] ) def draw_segmentation_map(outputs): labels = torch.argmax(outputs.squeeze(), dim=0).numpy() # Create 3 Numpy arrays containing zeros. # Later each pixel will be filled with respective red, green, and blue pixels # depending on the predicted class. red_map = np.zeros_like(labels).astype(np.uint8) green_map = np.zeros_like(labels).astype(np.uint8) blue_map = np.zeros_like(labels).astype(np.uint8) for label_num in range(0, len(label_map)): index = labels == label_num R, G, B = label_map[label_num] red_map[index] = R green_map[index] = G blue_map[index] = B segmentation_map = np.stack([red_map, green_map, blue_map], axis=2) return segmentation_map def image_overlay(image, segmented_image): alpha = 1 # transparency for the original image beta = 0.8 # transparency for the segmentation map gamma = 0 # scalar added to each sum image = np.array(image) segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR) image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image) return image imgs_paths = os.listdir(image_path) imgs_paths.sort() def perform_inference( model=model, imgs_paths=imgs_paths, num_images=10, image_dir="/content/dataset/semantic_drone_dataset/original_images/", device="cpu", ): device = ( device if device is not None else ("cuda" if torch.cuda.is_available() else "cpu") ) model.to(device) preprocess = transforms.Compose( [ transforms.Resize([520, 520]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) # Load image handles for the validation set. # Randomly select 'num_images' from the whole set for inference. selected_images = np.random.choice(imgs_paths, num_images, replace=False) # Iterate over selected images for img_name in selected_images: # Load and pre-process image. image_path = os.path.join(image_dir, img_name) img_raw = Image.open(image_path).convert("RGB") W, H = img_raw.size[:2] img_t = preprocess(img_raw) img_t = torch.unsqueeze(img_t, dim=0).to(device) # Model Inference with torch.no_grad(): output = model(img_t)["out"].cpu() # Get RGB segmentation map segmented_image = draw_segmentation_map(output) # Resize to original image size segmented_image = cv2.resize(segmented_image, (W, H), cv2.INTER_LINEAR) overlayed_image = image_overlay(img_raw, segmented_image) # Plot plt.figure(figsize=(12, 10), dpi=100) plt.subplot(1, 3, 1) plt.axis("off") plt.title("Image") plt.imshow(np.asarray(img_raw)) plt.subplot(1, 3, 2) plt.title("Segmentation") plt.axis("off") plt.imshow(segmented_image) plt.subplot(1, 3, 3) plt.title("Overlayed") plt.axis("off") plt.imshow(overlayed_image[:, :, ::-1]) plt.show() plt.close() return perform_inference()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/010/129010375.ipynb
semantic-drone-dataset
bulentsiyah
[{"Id": 129010375, "ScriptId": 38348600, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6961845, "CreationDate": "05/10/2023 09:40:47", "VersionNumber": 1.0, "Title": "PyTorch Semantic Segmentation", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 424.0, "LinesInsertedFromPrevious": 424.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 184689890, "KernelVersionId": 129010375, "SourceDatasetVersionId": 1834160}]
[{"Id": 1834160, "DatasetId": 333968, "DatasourceVersionId": 1871791, "CreatorUserId": 1200915, "LicenseName": "Other (specified in description)", "CreationDate": "01/10/2021 23:24:17", "VersionNumber": 6.0, "Title": "Aerial Semantic Segmentation Drone Dataset", "Slug": "semantic-drone-dataset", "Subtitle": "aerial semantic Segmentation", "Description": "Dataset Resource: https://www.tugraz.at/index.php?id=22387\n\n\nCitation\nIf you use this dataset in your research, please cite the following URL:\n\nhttp://dronedataset.icg.tugraz.at\n\nLicense\nThe Drone Dataset is made freely available to academic and non-academic entities for non-commercial purposes such as academic research, teaching, scientific publications, or personal experimentation. Permission is granted to use the data given that you agree:\n\nThat the dataset comes \"AS IS\", without express or implied warranty. Although every effort has been made to ensure accuracy, we (Graz University of Technology) do not accept any responsibility for errors or omissions.\nThat you include a reference to the Semantic Drone Dataset in any work that makes use of the dataset. For research papers or other media link to the Semantic Drone Dataset webpage.\nThat you do not distribute this dataset or modified versions. It is permissible to distribute derivative works in as far as they are abstract representations of this dataset (such as models trained on it or additional annotations that do not directly include any of our data) and do not allow to recover the dataset or something similar in character.\nThat you may not use the dataset or any derivative work for commercial purposes as, for example, licensing or selling the data, or using the data with a purpose to procure a commercial gain.\nThat all rights not expressly granted to you are reserved by us (Graz University of Technology).\n\n\nDataset Overview\nThe Semantic Drone Dataset focuses on semantic understanding of urban scenes for increasing the safety of autonomous drone flight and landing procedures. The imagery depicts more than 20 houses from nadir (bird's eye) view acquired at an altitude of 5 to 30 meters above ground. A high resolution camera was used to acquire images at a size of 6000x4000px (24Mpx). The training set contains 400 publicly available images and the test set is made up of 200 private images.\n\nPERSON DETECTION\nFor the task of person detection the dataset contains bounding box annotations of the training and test set.\n\nSEMANTIC SEGMENTATION\nWe prepared pixel-accurate annotation for the same training and test set. The complexity of the dataset is limited to 20 classes as listed in the following table.\n\n Table 1: Semanic classes of the Drone Dataset\n\ntree, gras, other vegetation, dirt, gravel, rocks, water, paved area, pool, person, dog, car, bicycle, roof, wall, fence, fence-pole, window, door, obstacle", "VersionNotes": "class_dict_seg.csv added", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 333968, "CreatorUserId": 1200915, "OwnerUserId": 1200915.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1834160.0, "CurrentDatasourceVersionId": 1871791.0, "ForumId": 345540, "Type": 2, "CreationDate": "09/04/2019 10:52:09", "LastActivityDate": "09/04/2019", "TotalViews": 84325, "TotalDownloads": 12575, "TotalVotes": 220, "TotalKernels": 76}]
[{"Id": 1200915, "UserName": "bulentsiyah", "DisplayName": "Bulent Siyah", "RegisterDate": "08/04/2017", "PerformanceTier": 3}]
# # Semantic segmentation using pytorch и deeplabv3 # ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABkAAAAH+CAYAAADEa8IsAAAABHNCSVQICAgIfAhkiAAAIABJREFUeF7svWdwXFeW53kIJGzCe0dYkgBoRVA0oiiJRrYkSqVyKtOqarW6O6Jju3d6KzZ2Nzbmw37oL7MR82Gnomdmd7rUXVXdVVKXSqakkkRJFCVKoui9AT1IAgQJ7z3Avf+buMmHh0wgYRLIl/m/ESCIzPPuO/d3zbvvnXfOWXJPFWEhARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggTAiEBVGbWFTSIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESEAToAGEA4EESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESCDsCNAAEnZdygaRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAm47Ag++NMHcvzEcfvH/JsESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEQo5AbGyM/OynP5PcvLwJuk0ygMD4cej4Edmx7bGQawQVIgESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAFDYGRkWH772zdl+/Yd0xtAcBCMHz//+c9JkARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARClkBfX58cP37Cp37MAeITCz8kARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIIdQIjIyN+VaQBxC8afkECJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJOBUAjSAOLXnqDcJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkIBfAjSA+EXDL0iABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABJxKgAYQp/Yc9SYBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEvBLgAYQv2j4BQmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQgFMJ0ADi1J6j3iRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAn4J0ADiFw2/IAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEScCoBGkCc2nPUmwRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwC8BGkD8ouEXJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACTiVAA4hTe456kwAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJ+CVAA4hfNPyCBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAqQRoAHFqz1FvEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABvwRoAPGLhl+QAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAk4lQANIE7tOepNAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiTglwANIH7R8AsSIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAGnEqABxKk9R71JgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAT8EqABxC8afkECJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJOBUAjSAOLXnqDcJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkIBfAjSA+EXDL0iABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABJxKgAYQp/Yc9SYBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEvBLgAYQv2j4BQmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQgFMJ0ADi1J6j3iRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAn4J0ADiFw2/IAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEScCoBl1MVp94kQALOITAyOiYjo/e0wrGuKImKWqL+vv+ZK3qJuKKjZGzsngyNjMno2JhER0XNu6whFhcbLUvUH770MjrMVXZweHRCG+xts+rgTxY6gE20YgN9B4ZGjVpeNlZ9jSyEBgOQBXP0yqiP/rHXa2R91TtVXxqFA+n3+ZI157K3wXCcrt+tzKdrm5V5IByhwz01zsfu3Zt2TkAWJZB6zfwJpF7rXLMztzObqt99yU41zs0aEKz5bu93rCNxMdF6bUGZqt+N7EzHCOr1NSfsbKKWLJElat3zJWsfYzMdIxhPWDftfWmvd6p+D4Ys2msdu/Z+t+obqCzWTZRA5oS1L6dbY61jZCay/q5r1rGHNRb9br+u2cfIVHPN3j9mbbCOJ1/XtamY+7sOg69dt0DmhWmzGevT9T108yVrn8fQx8hq3QJYO6daD321LdA12fSRr/Fn76PZzjf7OJmu3tlcgwLZ/9ivg/7mhX2sg810smY8YU9jb6+v/kG/+xrrdtmp5tBMZP0xn8nYC/aaPNP5M5P9xFSydjbBvl75Yu6rL62fYbz4W0fmOkYCWWfNmJ6rrL0N1rmGNlr7Yi6yqGu6dcRXv9vnpb99vhkjgcyJmcjOdo2F3ij+9PVXb6D7FNRrlw3kXPa10M58PvTCdd/fXLGfbz76wnDAtX6qPYfpE/4mARIggWATiP6/VLGeZP/+/RITEyMPPfRQsM/N+kmABMKQADZQ2OTqBxvq5257nxw9f0dOXGmS1o5+SU2Kl9iYKLl2u1O+OHVLLt5qUzfAIlmpCdLTPyyHzzXKkdq7cre1V3IzEvXDS6tslDKMZKUkSHff0JSydY2dEh/rklR3nFf20IU70ts/quuFfleVDp8cvSG37naJOyFW6RYnXb1D8uXJejl26a5XFjpcuNkmnx27OaVsYbZbP5w5ro79+sxtudLQLhkp8ZKcGCtNqu1fnLglp662eOtF9xtZ6IDzG9mPj9TJmWstiuWoYpOoR8qh803yzbkGrQNkk1S9dxSn/Urfk1eavbKjyth05IJH1nAEi1tNPfLV6Xo5f6NVbUQ9HIeGx7z1dnQNSKbqB8heb+ySA2cavLIZyfHKADMix2qbtQ5GNk7J3rgzWdb0JZhDFv0erx5e1t5q1zqg36ED6u3oGZKjSm42stAX/XNxvF70O/oSHJs7++W4GksYe/2DI5KuzoV+t8umKJbNqn+mk01S9UK2vWtQvj7dIGeuN+t60T9DyuB1+mqzakOjWHWob77P3Cur+hSyX6o6TF+642OkoaXX2z+Qzctwa+a+ZE29pt9zIat0OHJhcv80KB32Hr/hZZ6dBn3HvGPE2u8YI1bZHCWLB1pm7GE8FeYkqRvHaD2eIItxmqj0R1+Y+XP8cpOew0tzUzRz9DvmD+ZEXIxLMpRs9/hcgyx0yM10i0uNiTPXW+VzNVcg645X83J8DmNe+pI9cLZBty07PVGSEmKkbbx/IIv5AzaYl2dVvZBF/+D8kMW8/EbNVSML5r5k3UoWYwSyWBvU4ibpakzb51pSYozSOUbqLHMC6xvGHsaImT9gY2Rv3O32zjUtq+qF3qYvp5PtGxiWE5daJsxL9I+1XjDHmoF5aWSxFqYnx+q+nE7WjBEju+/ETc0Rcw9tM3PNzGGMBQVJLjd06DFtlzUcTb8rW5Vaa9p0/6Avoa+pd4Ls+NqNvsQYMWywFtrnpbUvsUZa11iMkX3Hb3rXTYwRFIw9I2vGiF02PzNJ1HDS48DM4Zx0tyTEu8TMtXOqHowRzB/MtQNqruO6Bh0gm6hkrfMHsrgGDqtrJ2TBEW0DR8hiPIG5uV5mq8+xNhxU49Ewx3UN/XNFMTfXVjN/+u2yaq7FKNmrPmRx3UabzZpsvQbZ106ss3faPNcgXNsgm66ueejPk2rdNesh1k7I2tctX7Jm/bavnZDF/sKss2Y/geuKde001xWsh2bttF6DrPVOJ+uzXsuabOq172n8Xdvs1ytzHbTvacx8m27/g77v7vPsaaLH11kz33ztaYysdU22zgvr/gey2NPgodVptcbb51Dj+N7D9Dvm0D3L/gf1mjkEWey1MC8wRszcxHXl4Pnbel7YZa37HxhsrNegguyJ1yDrmuHv2mbd05j1BddZ+54G+x/r+m3WTl/XV8w3rBnWdQvrLPY02CNgjbCunXZZrHHt3Z79hFUW/W69XtnXWbOnyVLrC9gY2dvqWo/rCtZDc72y7lNwvTLr7GxkcW2zXit87Wn8Xa+8exrLdXCq/Y99T2PGSGKcS68j1n0K5g/WOHtfYn9a72NPA2OaWWfNNci6pzHjCfPVLluQlaSvFWYtxDhNUXsU9KV1/4N6IYu55m9Ps1fNCcwfrQPud2z7H+yrsP/BGMH1ysw1cw0y48nfnsZcV8x9iVmPMdem2tOYtRtjJDU5TsDc9Dv2nGb/Y9/T2GVxr2Hd01j3P0YW92EHz93W9xpG1rqnwXjyJTvdnsbUi2ucmT+4b7Tvf/DdTbUHs+9pfMliT2Pmj789DfZV2emea6CR7erF/Ues1sPMVYwxc33G/snsQ82+zL4GmPtZI2vuUe31Qq+pZPFdn2Xds87XQPrC177V3hfHLjWquXC/zZgr8eplJJzb19zFeGpSzwpuqOvAwOCoYN4aI7mebCwkQAIkMEMCg4OD8t5772mbRmFh4YSj6QEyQ5gUJwES8E0ADyawsbuEh1fqpnD7+qX6gduNO53y2kfn1AazS9ZVZEtulltvvC/Utcqv91zQlb3wcIVUFWdIq7oJ+Eg9+D94vlEql6ZLRVGaFMemeGUH1Q3Oz55epb+zy64qz9KbJtT7P947I2nq3H+jHrqU5qVIp3rQ+vZXV9TNQ7PsWFcsK0vTxZUUqx/Q/ErpBtlkJVucmyy96mHiW19e1jc3kF1bkaEf3J+73iL/33un9Y3O/57i9ilbU5ml23Pw3F1584uLWhbnx00QHgD86ycXtEHo2c0V4pGNkn3HGpRul2Wpeqicr9hAFszApqt3UMtWLs3Aq0ryqWKz79RNyVUPeiGLhwhXG7rlf/zprJb93mOVUlGg2qberDWyYAU26Isrqk3QoaNnUP5q9xolm6aMMcNa9sD5Bt0HpQWpWrZO3ZTYZfHw1CpbpHjhodZF9eASsv3qpvKnT1XretE/hvmWlfkC2VTF/MSlJnn9s4ua033Z/kmyGCO+ZO+092rZWmWQWr88R9eLMWJk0ZcZqn+K1AOSFnVTYWQfrymVkrxUfV6rLPp9OlmMEdyMG1no8MGh61oH1AvmI+qJH/ry0+N1WtboAOa//+Ky3FUPCSGLvsSDFLss+vLGnR4vc8iuKsvSD6wh+6dDVyf0u5E142ltRY6WhdEKsphr6Ev0D2TNXPvhzkqpVv1s+h3jCbLL1DjxzFePLOYaxsiq0kwt+96By955uW55tn7Ij7ahXsjiAeCK8XmJ+XPmWqtsrMqVGvWTqR5cgjk4ovztiw/IssI0PS+tsqsqsiRBjfMj6qHebz4+r703kpUBBPMS4+nf9tbqdQTjqao0Q6JTorTsv++7qOf+CsUWcwP9Y+Yw5g/YoBhZjJGS/FT1IDpRz0sji/mD/nGpt/eMLPoSsnjgbJctU3MbHgBmrhUrY09mWryu1zp/dm9dJkuzk/UYsc5LI2vmD+YlZFEv1qHZyIIN+h3zxzovsSZDPzA39UK2MHvdJFmw8iVrrdeMJ8xhyIKNdb7r8aTqMf0O5n/2RLWUK92sHE1fYjxZ+xL6QhZz2PQPZDFGTF9ijBjmmD/od4wRzDVwxPyBJ6EZT5A1a2xrV793XkIWY0SdUj3kbdBrN2TR7xhPkDVrN8aIR3aJd52HDNqbrh4UmTmB9fjlJ1fqNRXzA9e1/acaZJm6plWrOZWZGq9lca3C2/J2WXMNLM5XY0rJoi8hi/KDHZWyXNWD+W6ul2CzTq2H4Gi9tmL+6GugGltWWTPXcA00fWnmGvwN0WbTn5hDWOOsa3KCuiZijStQ16H65m7NEnMTslhn8bDDvsZBFmuRXRYP/aaSNWunWb+NLNhiP4HrinWdxR4BbbaunWbvYZXFfMN6OJWsWWdxbYMsrm32enENAhsrSyMLjoa7WZOxvmA/YV2TcZ3GWm6YmzUZ6xrqNX2PNdnsf6zXNuxpROmAes3+x1yvsHba9zSuaM/1FXsajF9PXybp9cGss2afggdxZg6ZvUe+2qeg3617GswLvMFsvV6ZOQRZ657GzE2sRXuO1un9j5HFnsbImjV5UI1763ro6xoENriuYL4ZWeu1zbrOQhbri9nTmOsg1ji85OBz7VTjxex/sPcw66F93cLagX63rluQRV9C9t2vr+p5jAJZs5/A/tSs31ZZzLXv71jhXTtNv2PdwryMj4v21ot1NikxXu8Nsc6afYqRxd7Qem2zykJfGH6mk8W1zb6nwfXKOod9XQfNngZrkdmnmDUZ10zr/tSsyZC1Xl8xRrCfMOssGOIahD2NvS+xJmP/Y5c1exrrvMR+wuxpzFzDPgV7GrN+Y03GeNLr7Pj+1OxpMpXxA+PJzDWz/4Es7jXM/DF7Gqw5kAXHK/Ud8ui6Qn1tw/dGFvcPWckJev+DMWLWTcyJmhW5eu+B69V7B67oexi0F/MT/W7mMGSxTmJe4r4EstY9De41zDjFNQh7DxSrbL7aS2P/Y50/uAZBFntZ694QsugfX7L2PQ1kYdzB9crca5h6rfsUrHd2WazHvvY0uGcryUvy3guiXhTsPex7Gqss9MWag7k2lSz2NGb++NvToN6KQnVNsOxpsF5UFNZ41wCzZ8U1F/d81n2O0cu6D8W5/kJtUMy9pHU9R3v9yVr719SLfb51XoEv5qu+n1Rj0d4Xpt9wj4hxa+b2GbVemfs+e19AFm3G/hK6YR+Ivb39fsTMXbxkhRfQ9p28pS5jMfL4xlLZWJ2jeEXrF7CMpyiMvSwkQAIkMFcCNIDMlSCPJ4EIJgA3WhS8IYiHOGYzjg22ufmIVW/yYlOJTRTeWDVlUL0FiBsLHGcteEsMstYCWTxQQsH3JuyIkcVmGAYYFF2vqhOfWevGufFmYP/w/brxd3ffsN6UmoK6cawvWXwGnYfH6zCydn37BzxtwM2MKUPDI7peyOLtJlPw/+FRj77eD9EOdR57vfgen6Eeu6zWV53XWvRnFjb4znDBd9YCDhP6x8JwOllTL/rIKutlrvpseLx/0GdoG8pcZKEv6jLF1Ds4FK37B2+84W1q6OBrjEFXjEtTjCzqtRZdr2JhdDbf+aoX3+H4tKT7YwSfmXFu7XerLP6v9R0fV6jDPkZMX5rzQ9aMU/MZfuM4yEI/Mycga9cfspgL9nlpZNFme7/7Go+odxIzNeYwpq3jCeezy0E/tAGyKLjRAQcUc268LYwCWTMnJvb76KR6IW/mmj54vPSrN8vM2jDhczPfLfPHyOKcmLsoZg7buYAjPrMyNuext9nI2uewmZc4j3d9G6/XyOJz8LDKGnlvveNzYoIs5sA4R9Nu0wZryANd7/jcNHKmXvO30c0uh++t892MPTN/dN1KB3M85PWYtsxhfGafq5iXpi/tsob54JDR7v5cw1o4oo6Njo1S5/CMZavOWI/M2IUsHhC5oj3rAca5XRY6aH2tY2R8nbf35f1xOnGdxjjHd2Yt1OuKZZ5BBxS0077Oo999zUmvLK6L48dPdW01/Y65ZoqvvoSOpj+ta5FZZ+8TB3MPd/u8wHH29dCsLzORtc8hnBuf2dcXtMM+fiCLc/mUVfK+xpS1bWadRd1WWes6a/rTjEccb5XF/40OZl7gb1yDzB7BOi/MmmyVNe0y3PAd2mS/VuB7fGbd02h9xsevtW2GjXX8WtdZ+/XKPiZNvfjcyGLO4f9G1qydkDXzwq6D2f/YZdGWCfNt/Hrla75Z54bel42vnfZ+1+uQfc85Lmv08oQU9Kyz6CPr2ol6wRd9avpHt228TiNrHROTZG1rrNl7WPvWrAXWuWnGiOl3vcbpOe9Zt+x7GugFWehm3xviM1/zPhBZc6ydo3W+mzVZ1+eDuZk/dh0gq/n62MvaZc3fhhva66/ffclC3r7/0W2z9aWud3xNBk+zzuIzszZZ9ylog9nT4FhTrHriMzPX7LLW65WVsZk/djZmvmMcmWKda2Ys4Ttf/W7WBpzXWqyyZl/m7R+brJ6reo0Z3ytZ9inWeq37H+t8N2PElyx0miRrmUPg6K1XzU/rvNT1BiCrr6+2eYnzWuu1s0Hdplj3hjjGFLOPxN/efd34PhSfmXEzYZ/j43jN1rKH8/bZLGVxbmtfGH2t+0vTF2Y91fPS0mZdxzhfX2ukqVPLWdZL6/3I/TXvnvIWVp5AysiIa+Ly4jT9kmBb57CcUp7cGIt4kQ8vq+F5AwsJkAAJzIUAk6DPhR6PJYEIJYCbBrhBv/nFJR0aypSMlDj9X7xZiTcREQoLrtNwDY8Zf7gUCDJfslZjQiB1BCKDt9ZnWuwPFmZ6fDDkQ0EnqzFhrm2MUW+cBrv4G0+zGRNWXed6fDDbbX9ggXP5mmtz1WGqsTAffBLUmmJnHgpzYK7c5nI83ppDsbOZS53THTsb5ngDeTYlGON0NnrYj/Gnl7/P7cfP9O/5mD8zPeds5RG+ZSYlmG1D6I1QK/6uQYupJ7w9nFrmc/wgHFIol/lsayi3k7qFJ4Gp9ojz3eLZ7FNmo8NCtmk2+oXqMTCuwcu8vrlLGyTh7bJiaZr2GLmlPAd/91mtvPbhWfndJxd1eE8WEiABEpgrAefudOfach5PAiQwawJwk/+tci2Gy/Deo7e0Oy1u5guU5wc2my0qVv65q236jRq425qHc75OOJuHANaHaHjQNJONp12XqW4k7bJGf+RJmq8y1UMiXw/RZtre+dLTWg+Y+XqgNFVfLoRRIxhtDWad/sYePvc1LmYyzu16Y17a+8ff+LYfGyp/+7qRTVDhP/wVX/PHnyw+Rw6HuRRf+tn7eCbMZyI7F72tx9rbMBsdYISZy4NUuw5Ttc0ua/9b9+sUY8TeP5Cf6biZSr+pvrPPx6lkp/rOri/CUaDMV/2+1qKp9LF+Z+fr79o51TnsdaB+X5/Nps0zMRhaZY2+vq6D0GMm9dpZ2vvTtDXQ/oT8XK8Vdh2go6/P7Loj3MlUxV8d/vS1r8m+jvc156fSYSbfTcU80DXO11gIhs5T6eqrzTPRIVBZ+7yETv761pdO+Mxehz+5qXTyNy/tdfkaT3aZqf6GroHqO1U9c/nO1/nBZq5tg06+GPs631z0D+djp9p7+Gr3fPSZvd6Zzj/78bN5aWWmYwQh3tq7B/SYLVNh7opUyFgYRm6ofCZ43oB8afUtXV7v2QHleYIfE4XCrjP/JgESIIGpCMztbn+qmvkdCZBA2BDQrtrKBRXx1JEPAw+z4dIKQ8fh2kbZsipXxev0xJ7H2xuNrT1yu61bbVh6pEjFNy3KTdIxngsykvVbHSiIbYt4oig5aUiYK/qhWVFWirQXDWpZcxPjSxZ6TJT1PPSBLGKa4ljEN/bUu0TXB3fdTHUus6HzxDxPUUmHVdxkt8eogZijFXlp+jjImoIYv4jfDtm4WM+nkC1QyWTtshmpcVJdkqlv/BACDAUJSREzF5+BhymQLVcbPsRftcqCDfTF96ZAH6ODkU12u3S94F6Yo5JIqzes8RDCyKItYIWCNkK2XRms0B4tq36MLBiZm3qw0zF+lRELsiiI1Q5ZxNIFe9M/Jo40dPDKqr40zCFr6kVfm36fjSzGz7KCDM3GqoOpF95GaCdixsL7yOgAjtAfY9gui7bZZRHDHgWyGE+mXnyG8T+hXv1Wu0rors4BNhgTZjxp5qp/cSNp+h16WGXRh9AXshgHkDX9DlkcZ/od4wjF9CWY+5K1zh8ji75MVrGrUSb3u+fNfMhO6h+Mp+REPaZRL5ITGh3MOMW889TrYdORPz6HxznivGgDCmLTo2BcghUMpehLJFIEBzCHLOYKYtN7ZFVfmrmmdIEOaANi+oM55mXM+MM3jBEja50/eZn31wbIosXodzPf4cGGuYMxYmTR74Y5fltloRd0KMpOUfr26nN6+2d8/mCu6XrHx4h3rilZM4fNXLPKjoxNnMOoF3WAh5mXRl+sQ6Ze9BPmml32fv/cn++QxRyGrNEBbTKy/uqFLOKKG1kcb50/Hh0865CZPziPqdcqi37Xfam4W/sSY8TMYcNcryOqb1AgizEC5pg/KPZ+Nw9jrbKmfyBr5iXWTfQ5zgempl7oaeq1r7GQxXFmXprrmpk/eu0e73cEbwQTrPM4p1UWawtkoSN0GJExPRfK8z3jycii3+2yem0Yv17qtdA71zx5P6C7uQaCsZGFLphrKOgT9CXWBiNr2jx5jfOsh5jz6E9cB8FBX4PG1zizzoK9r/UQx0ySxbieZu009UI3r6xlP2HWWTxE8a5x4+ss+ghtt3KHDkYW3M2a7FN2/Do4Xb24npm108iCk+Fu1mSczy6LOTRZ1nMNwj4FfY+C48AcsqZ/rHsafG/WQzBZosKFYJ2172l0v42vydY9DfQwa6d1n2LWZMhi3cLaid++9jQ4zuxpzHyDrL89jV0W48krO+5VjPFk1lm0xXoNMnsacEKZsCbrPc34tc2yp/Guh5hD4+u3nkOKFZhZ9z9GVs8hiyzWWXO9Mv1jZPVaZPaclnUW89zMH6Ovfe1E32KM2GXt6yzWHzPnjSyuV2Z/imuc0cG6N4Ss2dMYWfv+B+cya7KRNeusZqPmj9l76Gsm2Kh+N/sfIwuDJ/Y01n2KkTXrrH1Pg+sg9EWBrPWaCT3B3Lqnse5PJ/bPFHsa1T6sg1oH655Gtc3el9ADY6O6ZOKeBnMC7UWx71PMnsaMPXMNwp7GrLPoZ3BE0ddM1eco2HuY/Y+RtV6vrHsaXGPMfDfXK92X43slfI+xhIL/m76EDMY52BtZnNdcM+2ymrllTwO4qABMAAAgAElEQVRZFH0NGp8TmJemL3H9NvPSyFrnD5ibPY2RxTXI6GCfw3ZZnBvHedZNdU9l5qX6jTmMz029gcqaPY2p16oD6rfPCfSf2ctqjuPzHbJmfUK/6/sl9Rn2Uyhmb4n/Y9yYee3d54yfC/1m+sGcC7KY13ZZT12e/rXK2vUy49HaF776DX1h9pe+ZD3XD08f49zW/SXaWpzbq9tsve+DXnpttd1jIMTW3ZZevQ/KUnNqrcqz48lTMqSSovfoMHP4/BGVJwdrJ7xEPj1yQ5rU84etawr0mA7UGK07gIUESCDiCSy5p4qVwj/8wz9IYmKi/PznP494OARAAiTgiZF7WSVGQ+I3GDN2bCjW8dLf3n9FJwVHQfK8H6gkjdjI/Jffn5CrdzpkTWm2vPhYhU4M2KSSe2KzkqISEiKJW6zawLV3D+pEkYiVnKUS7CHJHkJmQba7f8jzMMsiizdEUKaSxQazrXNAJwzEQ2okBERCy0G1sbqtjDH9+gEwdFAPvNT3dllsuvBWiV0Wm6tWW72+ZBGfFOWOSsKLmzjoUKg+QzI8bNoamnt0e/EQATeFgcoiQSQ2nL7qhfcNkqeaeu2y+kZcccRNgVU2L92tk/biLZvbavNpZT4fsuhLK0fohf5B/yLhH4rRwZcsxkizkoPOKJkpCVrfqWRRr7XfEWf6bluf7ncwN2xQ72xl7f1u6oWOaJvpdzP2unz0T6CyGCPoH2u9GE94+9her112qn5HUmUYNf31OwyXKOgfyOItq3o1ds0YQVJOzAn0jV3WOn+gA2Qxdg1z1It1xM7ROi+tYwSyvuYw+tKMEdOXZq5ZdbDONWu/W+eldYyYeTmdLNYhJPtEMRzNfEe7u/uGvPPSlyxkcjMSJ/XlVLK+6rWvm+Bo2BgdoKN1/pg1dj5lMccMR2u9Vo5GB1+yuDFHUtip5qUZI9bxZDhijZ1Jv0MWSbut6zFuzs3Ys67d/mQb1Twx8x0PObC+YV6aOWGuVdY1FvVC1qyxU8na57Av2Zt3uydc16zXKutcs853w9HImuuwmWt4bGhnOdXaaV+LIItiXw+RANU6L6ZaOwORNXwMd3MdtF7bjA5GFmuGfW5iDlnX2dnKYk9j9h5Y3+xz01qvkQUnX9fBqWQz1Jps3dNYr21WHXANsq+zRhb7H+tY9yeL9RAx18HWvs7OZU9j1llzXbHON+t1xdo2f9cga7+DDfLb+Lu2+RojRtasW4Hsf4zsbPY0huNU66H92uZLdq57GowxXMMwRtCXU+2VjCyuxfb5bt97WPdggcgGuqcxY8S6/8H8AZvp+t2+T7Hvf6ba0/iSDWRPA2ao1zrXoK91/2OuQYHsf6zXq3y1L0KumkD2KViHUHzdP1j73VyvIGvqRRvmY5+CcYb7Q7P3MPXit6/12H4vCB2sstARaxauE1ZZf3ua+ZS13z9Ah+n2NP72REYvX8fb14D5ksX6Zh/T/vrY1zXMLou+mG5/CWZIeG/f95hrH8bmqcvNeu/02ANFsqwwTYe7+sUfTsgRlRz9qQdL5dXdq5VRJUlOX2uRf3zrhPYMebymVP5SfY77FBYSIAESsBLo6uqSv/7rv9Y2jU2bNk2AQwMIxwoJkIBfAnjwic3GL98/I7Uq1wfekPm7767Xm5OjF+/If/nDSbmijCNryjPl73+wQb+JgQcy2FzhpspsePyegF+QAAlENAHzBsbsMkNENDo2ngRIgARIgARIgARIgAQcSwD3ATDqw3PKrTwT8XLGtdud8qsPzktbT7+8uG2ZbK9Zqo1lv/rogso/elGH+PzBjkr98iWeN5hwWEyS7thhQMVJYF4JTGUAYQiseUXNykggvAjgjVh4bSCGMdxQYQT54mS9fvOmoiBdHltXpBuMkER4OwebljLl8cGHmeE1DtgaEggWAa4VwSLLekmABEiABEiABEiABEggdAngPgDeIfgxBZELfvatldKljB4IpwYvzWPKGwRht4eGx2SdCpUFbxF4pcFr8LryCMHzCuPNFbqtpWYkQAKLTYAGkMXuAZ6fBEKQADYTCEcCV1dsQp7eWCq3W3u1t8cXp+pl3bIsqVmRK7tUOCzE4IQbK2KdovCBZgh2KFUiARIgARIgARIgARIgARIgARIggRAmAGMIXqhEqDd4dSBc21WVFP2uCpeFnCCPq+cPCCeKUMd7jtTJJyovCPKw/ODxFVKtolXQEySEO5eqkcAiE6ABZJE7gKcngVAiADdUxGjFRuL01WZt+Ni6tlA2VOWqpOa98nrPRelQxhHE3lxbkeNN+MmNRij1InUhARIgARIgARIgARIgARIgARIgAecRwAuViESBgpBXG6vzVO7OQUmId+n/Ix/p/hP18sevr+kXNBtVzsctq/J0OO4ovo7pvA6nxiSwQARoAFkg0DwNCTiBAJJHHlcupu9+dUVuNamk4YMjkuSO0d4eO1X8TfyN8sCyHJ2AmIYPJ/QqdSQBEiABEiABEiABEiABEiABEiABZxFAiO215Vk6HNbI6D2Jj4uWQ2cb5a0vL2vjR3JijDyyukgqSzJ0OG4WEiABEvBHgAYQf2T4OQlEKIHMtHjJy3BrA8gp5QWS8LlL0pLitaspko2hJKs4mzR+ROgAYbNJgARIgARIgARIgARIgARIgARIYAEI4LkDEp6jIFT32bo2HZEiNiZKtq4slBcfq9CRKQZUQvWrtzskXcnieQafVyxA5/AUJOAgAjSAOKizqCoJBIvAmIqxOTQypr06ENrqBzs8ZzqivEFOXG7Sic9h/DAbj2DpwXpJgARIgARIgARIgARIgARIgARIgARIwE4gLtalwl3lSmtHv/4KuT+WFaZJ/9CIDou17+QtKcpOkZd2rZDc9ET74fybBEggggnQABLBnc+mkwAIjKiwVxdvtcuJS02yWrmXVqnkYRurc7xw2tVbFkUqEbpLGUdYSIAESIAESIAESGA6AnixYnh4SEZHRyU+PmHB38LE+QcG+mVkxBO6czp98X2iW+11GD4jEFSUIQESIAESIIFFIYAk6XhhMys1UT/HQJSK7r4h+fzELfn9F56wWEtzOmV1aYakqlymkGchARIgARCgAYTjgAQinACSnr//1XX59HidNn58a3OZbF1ToI0gyW6XDA2PqIRiGeKOj4lwUmw+CZAACZAACZBAIARgfPjgTx9Ke0ezvPTSjyUlJSWQw+ZFBsaPW7duyscffyjnz1+cts62zg4pKcyTP/uzn8my5ZULbqyZVkEKkAAJkAAJkAAJeAnAqFFekKr/RkisPUfq5Pf7LukQ3sgJsrI4S3Kz3Dq6BQsJkAAJGAI0gHAskEAEE8BDgiblPnrldpt6c2JYEPLqTluv1Lf0yLMPlcnqskyJWrKEDwMieIyw6SRAAiRAAiQwGwKJiQnS0xM/m0PndAyMH7/5zW/k7NmzUrF8jcRkpvusb3C4T64fOyqffrpPioqKZcuWR6R82QqJkiU+5fkhCZAACZAACZBAaBFo7RqQc1fb5K56qXNpTpI8ubFUdm0oVp4hyTJ2T4X5HhqTOGUw4ZU9tPqN2pDAYhCgAWQxqPOcJBAiBJAYrEwlDPvRzipJT66T8zfa9JsTH6u3KCryUyVHxc20Jw+7p3TnBiJEOpBqkAAJkAAJkEAIEkhITJRtj2xTYbCGJSkpeUE0NJ4fb7zxuly+dlV2ff/PZd3KdSpJ6uTbHXi3njp/Ss5+86U2fuTnpmtdWUiABEiABEiABJxDIDcjUeUEydMK11Rmy7a1Req5Rpy0dw+qhOjt0tM7LJtX5zOahXO6lJqSQNAITL4jCNqpWDEJkEAoERgYGtXqpCTFyaPri2TZ0nQ5eP62fHmqQTKSEqRIvTUB7w9rwcMFvEkRrWJk0wgSSr1JXUiABEiABEggdAhgj7CQYa/QcuP5AePHtmdfkq0bNoorOcknlAvK82Pv7/9FYqPy5YHdK2Xg8nWfcvyQBEiABEiABEggdAkgTDeeZdRU5UqqO04reqWhQ744Wa9f6kxTzzqS3DHyYGXepBc7Q7dV1IwESCAYBGgACQZV1kkCIU4ACcNqb7bJjcZOyVfxMUvyUqVA/X7+4WXywDJPAnTE1bR7f4R4s6geCZAACZAACZBAhBGwe35MZfzoHx6VC6dPyLuv/UKGR9Jk6/M7pLutVWppAImwUcPmkgAJkAAJhAsBGEHwg3wgp1U4rPcOXJYTl5t0iO9+9dJn3Z0uqVmRyxCX4dLhbAcJzJIADSCzBMfDSMDJBHr7h2XPwRs68XluhlvWlGbLxqoc7QWiDR/M++Hk7qXuJEACJEACJBAxBBobbwvCXp2rrZ3e88Nm/CjIK5KLygDCQgIkQAIkQAIk4FwCCNONfCAwfuxXES1iY6JkTXmmbKrOl1VlWZMiWzi3pdScBEhgtgRoAJktOR5HAg4mMKDegEyI90z/K/UdcvNul3x5tl5eeLhCfvxEFWNkOrhvqToJkAAJkAAJRAqBGzduBGT8sHp+gA08P2D8YCEBEiABEiABEnA+AYTeRD6Q5UXpujH4vbE6T/1Ok4RYz3MP5jJ1fj+zBSQwFwI0gMyFHo8lAYcSyEyJl6c3l0hGSpwcOt+ow2ENDo3o1kRHRTm0VVSbBEiABEiABEggUgg0NDQEZPwADxP2qqc3XnZ8/xkaPyJlkLCdJEACJEACEUMAho5nHyqTnv4iKcxO0oYP5C+909YrTR39kpOWoMJ++84NFjGQ2FASiGACNIBEcOez6ZFJALGyEeJqWWGaFOemKLfQPDl84Y60dQ3K1jUFEuuiASQyRwZbTQIkQAIkQALOIBCo5wdac2Lffnn3zV/qnB87vk/PD2f0MLUkARIgARIggZkRQP7SfGXgGFX5Tpva++R4Q4t60bNFLte3S3v3gDz3ULnsVhEvmOd0ZlwpTQLhQoAGkHDpSbaDBAIgALfPKw0dcrdtQCU+T5LM1HivIQR5QVKTYrkhCIAjRUiABEiABEiABBaeAF7iQM6PP/zhDzrnx66dz8uGDRvFlTz5jU5v2Ctl/OhKFNm1ncaPhe8xnpEESIAESIAEFo4AQmEhF8hvPqqVM3XNOtR378CwxERHy7KCDNm2dlA/A2EhARKIPAI0gERen7PFEUygT138vzhZLx8fqZM8lfx8eVGGrF+eKyuKUyU7NYHGjwgeG2w6CZAACZAACYQ6AU/C8zfk1NkzOuG5P+PHkr4+uXDhvLz72i+05weNH6Hes9SPBEiABEiABOaPQGt3nzZ+oORnJsnKkgzZWJUj8XHR83cS1kQCJOAoAjSAOKq7qCwJzJ4AvD9aOwek/m6PXGvs1D+nrjbL4dpG2VSVLz97ulp5gMTN/gQ8kgRIgARIgARIgASCQMB4frzxhsf4MZXnB05/zGL8YMLzIHQIqyQBEiABEiCBECWQlBAjD68u0NoVZafoFz5N9At3fEyIak21SIAEgk2ABpBgE2b9JBAiBOAO6opeIsuL02RrT4FOBtY/NCodPYMhoiHVIAESIAESIAESIIHJBAL1/PCGvRr3/KDxYzJLfkICJEACJEAC4UwAyc+3r18qG1WuUxg8khNjdTL0weFRHQ4L3zMPSDiPALaNBHwToAHENxd+SgJhSSAnPVF2by2XnTVLpamjXxqbe6RVJQRbsTRN3OpNCRYSIAESIAESIAESCBUCxvPjnXfe1WGvpvP8uHD6hDfsFY0fodKL1IMESIAESIAEFo4AjBsweqB09g5JfUuP3G3pldvqBdDM5HjZtDKfeUAWrjt4JhIIGQI0gIRMV1AREgguAYTAwlsPKMj/kZ+VJKvLMmVk9J72DImOjgquAqydBEiABEiABEiABGZAoLnprpiwV1Pl/LB6fqB6Gj9mAJmiJEACJEACJBBmBIZGxuTAmduy7+Qtua5Cf5uoF+uX50hFURoNIGHW32wOCQRCgAaQQChRhgTCgMCgCnd1/GKL1N5skZy0BElOiJUkd4wyhiRJQZZbECKLhQRIgARIgARIgARCgUBDQ4ME6vlR+9XX8u6bv9QJz2n8CIXeow4kQAIkQAIksHgEBodGdKSLE5ebpE39RomJjpZ29f++wZHFU4xnJgESWDQCNIAsGnqemAQWlgA2AQfONMifDl3VJ05xx0maSnqOBOg/eaKKb0EsbHfwbCRAAiRAAiRAAn4I3L1zx+v5ocNebXtUXMlJk6S9nh/K+IFC48ckRPyABEiABEiABCKOQJzK81GSq16KWFnobXuiygdSmOOWDBUGi4UESCDyCNAAEnl9zhZHKIGB8fBXaP7Q8Ji0qBwgXb2DUpDplpGxsQilwmaTAAmQAAmQAAmEEgGr58fa7U/L2i3bfBo/oDM9P0Kp56gLCZAACZAACYQGgVhXlKytyJCKwmQd6js+JlpgFEHo76gljH0RGr1ELUhgYQnQALKwvHk2Elg0AkkqyfnGqhxJiJ847ZcXpuoNAQsJkAAJkAAJkAAJLCYBeH4g7NXhY0cExo+nH33cp/GDnh+L2Us8NwmQAAmQAAmENgGTCN2tnoEg5ykLCZAACdAAwjFAAhFCIEG98bB1baFsqMqd0GK8CYE3JFhIgARIgARIgARIYLEIwPjx5h/e8ho/dm3d6dP4Af3o+bFYvcTzkgAJkAAJkEDoExgZHZOz11tlz8Eb0jcw7FU4U+VCfW5rmZQXpIZ+I6ghCZDAvBKgAWRecbIyEghtAqM+Ql15PqMBJLR7jtqRAAmQAAmQQHgSGBu7J81NdycYP+j5EZ59zVaRAAmQAAmQwEIQGFThv280dsqnx+u8SdBx3vL8VFm/PJcGkIXoBJ6DBEKMAA0gIdYhVIcEgkWgu29I9hypk6O1d6V/cESfJiHOpWJjZsvureWSqhKis5AACZAACZAACZDAQhKwGz+m8vy4cPqEvPvaL2R4RCU2fX6HFOQVLaSqPBcJkAAJkAAJkIADCERHRakoF9GSm+HWuT9MydN/O6ABVJEESGDeCdAAMu9IWSEJhCYBJEG/3tAt+081yPDoqFYyJtqT++OJjSVCJ9DQ7DdqRQIkQAIkQALhSMB4fvzxvXcmhL1KSE+Z1Fxvzg9l/ECh8WMSIn5AAiRAAiRAAiQwTgAhvmtU6O/MtHgZHLqPJdntkrK8yfsMgiMBEgh/AjSAhH8fs4UkoAm41FsQGalxsqwobQKRouwU/V0gZUnUEhGVREz9y0ICJEACJEACJEACsybgy/PDl/EDJ6Dnx6wx80ASIAESIAESiDgCSIKem54oOerHXvgsw06Ef5NAZBCgASQy+pmtJAEV4ipWntpUKhur82RoeERiYzzTPyUxVn8XSMFmITo6MGNJIPVRhgRIgARIgARIIPIIIOF5IJ4fS/r65NiF8zrsFQo9PyJvrLDFJEACJEACJDBTAtrLtLNfbtzpnHAonoHAA4Thv2dKlPIk4HwCNIA4vw/ZAhIIiIBLGS6W5iZLUXbSBHl4dczkLYiZyAakGIVIgARIgARIgAQihkBra6s2fnz1zSFZu/1pQc4Pf54fxvjBnB8RMzzYUBIgARIgARKYM4GhkTE5rnKfvvbhWenoGdT1IRdIfkai/NXuNbKpOn/O52AFJEACziJAA4iz+ovaksCsCfQODEvdnS652dg1oQ7ExVxbkSPxsZ58ILM+AQ8kARIgARIgARIggSkIGM+P6YwfzPkxBUR+RQIkQAIkQAIkMC2BoZFRbfzo7hvWsvidlhQ37XEUIAESCE8CNICEZ7+yVSQwiUBP/7DsPXpL3jtwxfsd3oLYsCxXSvJSlQFkcnzMSZXwAxIgARIgARIgARKYBYGZeH6YnB89vfGy4/vPSEFe0SzOyENIgARIgARIgAQikYAreolUl2bKz55eJf2DI14ECP2dlzExIkYk8mGbSSASCdAAEom9zjZHLIH+gREZHB6N2Paz4SRAAiRgCIyMjun/Ri1ZIkiUyEICJBA8AoF6fkCDE/v2y7tv/lIQ9mrH93fQ+BG8bmHNJEACJEACJBCWBBD+e/nSdFlWmDapfQgBzkICJBB5BGgAibw+Z4sjlECqO06e2lIiVSUTNwH5WW5JSoiJUCpsNgmQQCQRQELEnp5uaWlqkmvX63TT19esl8zMzEjCwLaSwIISCNTzwxv2Shk/upRT6q7tNH4saEfxZCRAAiRAAiQQRgRG1ctO9pc/o6OiJNYVJTSChFFHsykkECABGkACBEUxEnA6AeT4WFuepX/shW8/24nwbxIggXAj0NXVJVevXpNPPtkj+/bulca77VJVVSH/Mf8/Snp6Br1Awq3D2Z6QIADjx1tv/X7ahOdL+vrkwoXz8u5rv9CeHzR+hET3UQkSIAESIAEScCQBeHofv3RX9h1rkD6VC9WUzLQEeXpziaxQ3iEsJEACkUWABpDI6m+2NoIJ4M3n7r4hGbCFwIqPiZbkxFg+/IvgscGmk0C4EzDhd9568y05cuyUtLY2iSvaJfm56TI8fD8ucLhzYPtIYKEIYM/R3t4m7/3xvWmNH9rzw2L82Pq8szw/8JCls6Ndo01KSpK4OCZYXahxxvOQAAmQAAmQgC8CI6P3pLGlVz49XudNgg65pTlJsmVVrq9D+BkJkECYE6ABJMw7mM0jAUMAxo89R+rkaO3dCVDWVmTL7q3lkprEG3aOFhIggfAigIewjY235Z//+Z/l17/6rVy/flkSE92ybt2D2vtj167tkpOTHV6NZmtIIAQIINQcPD/+5de/kWUPbJAUd6pcuHbJp2a379TL5+/8VtyxCeI04wca1NrcJP/4j/+PbtvatWptqa6U4uJiSUlJ8dlefkgCJEACJEACJBB8AskJsVKalyL9Q/dzoBZkuiU2ho9Bg0+fZyCB0CPAmR96fUKNSCAoBOD5cb2hW/afavDWHxsTpf//xMYSSQ3KWVkpCZAACSwOARg/bt26Kb/8p/+uDCCvS31DnRQVlsrzzz8tL774XaleWS3JycmS6E6aVw84hNoaHh7WeUXMW/A9PT0yqj5jCT8CcQkJkpWVxbf+bV3b3t4ue/d+Lr3dQ9LWcEM++t07EuPq0OGtrL9xWENDjz56+1+96MiE5wODg7L38/1y9tQ5ZfRIl9Wrlst3vvcd2bXjcSlcupRjI/ymPVtEAiRAAiQQ4gSQ52NDVa4sU6Gu4KlpCsKCZ6QkhLj2VI8ESCAYBGgACQZV1kkCIUgAoa4Kc9yyUW0ErGV5Ubq4VDIwFhIgARIIJwLNTXflN7/5jfzX//aaCk/TJsuXVctf/tXP5KWXfiiFRUtVCKzgrHvIM9Ld3S2VK5bLxUuX5cyZM3L7jic8TjjxZVs8BBIT4qW8LF821NRI+bIVQRtXTuSdmZEuL//8f5WVyzdJnNqD+Crd/UNy8OA7cvvmTe0l4sTicrkkIztLYmLj5I7yZsHP2XOXdeivV//8L2TzZtV+hsVyYtdSZxIgARIgAYcSQI5TRLhglAuHdiDVJoEgEKABJAhQWSUJhCIB5PlAqKudNUsnqBenXEDTkxn+KhT7jDqRAAnMjkCfSqj84Ycf6bBXyPcB48d/+Pu/le999zuSm5c3u0oDPAohtWAA+eN778jXX1+T7Ow8yc1dKQkq9BZL+BFobr4t+z6/IEePnZMXlHcRH3bf7+PYuHht1Mgv8x9rO7FrUNISM+S23HTs4IAn2V/+9C9kx7bH5NTZM/LNl4d0uL1//+2/S5syvv6Hv/k72frwVhV+L9GxbaTiJEACJEACJOAkAvDCvtPWK1dVBAxriYsVqShIl8zUeCc1h7qSAAnMAwEaQOYBIqsgAScQ4FsQTugl6kgCJDBXArjhOXr0mLzxuifnB8JewfPjhz98SYelCnZxRUdrr49Tpxtl27bdUl1dIzExvMkKNvfFqn9sbETuNjXKoYMfyLt//Eji4+OkZsOD9ARZrA5ZhPMmJSXLs7t3y+jIsLS0tMgnH3+i15/Pv/haPv3gUxnp7Vda/W/yyKOP0BNkEfqHpyQBEiABEog8AkMjY3LqcrP8295a6egZ9AIoUzlBXn1uDQ0gkTck2GISEBpAOAhIIEIIDKjkX7U32+RGY+eEFudnuWVtRY4gHiYLCZAACTidAPJ+4OHjgW8O64TnyPnxve98d0GMHzC+nDlzVo4cqdPGj7Vrt0q0MoiwhB+Be/fuyZIlS3T/wsgW++i35bO9r8vHH38sFRUVCzLewo+qM1uEF0yiZIkyesVJYWGh/OClH0hRUZG43L+Q/Z9+oQ0h+H9Kaoo8+ODGec055Exi1JoESIAESIAEgk9gaGRUGz9aOvAigqjk51HSrv4eGh4J/sl5BhIggZAjQANIyHUJFSKB4BDo7B2U/Scb5OOjdRNO8MjqIinJS1UGEIZmCA551koCJLBQBBD6at9n++SP6k38ru5OefrJJ+SlH/5YisvKF0SF9vY2OX3mtA57Bc8Plyta1HNyljAkAOOHteRkF8iaNY/IV1//Ua5evSrp6Rl80B2G/R5IkxDqCt4eseN5P+AFAkPI70rKtYEEPywkQAIkQAIkQALBI+CKXiLVpZny06eqlcHjfhL05IQYKcpODt6JWTMJkEDIEqABJGS7hoqRwPwT6B8YkcEh9TM8qitHUtK+geH5PxFrJAESIIFFIHDn9m35w3tv6yTEeCv/O9/7jnrjesOChSNqbWmW1vZ+nfMDYa8i3fhhsxGENQ+0NTUtW1JT0pUB5IoOgwWvAJbIJICk51sfflj+RuUDamtukcPKI+1P7++RlSsr5aWXfiwpKSmRCYatJgESIAESIIEFIOCKjpJlhWlSXpAqI6P31L3AEu/vKPsGdQH04SlIgAQWnwANIIvfB9SABBaEQKo7Tp7aUiJVJWkTzocQWPiOhQRIgAScTADeH/u//EpOHj6tm/Hwts3y5JPPLGjiYeNSP5eE5ya00kL1xXyfD/eUCAU2MjIko6MTQwzAKBTOIcFiY2O14WtwYGihuoXlIqoAACAASURBVI/nCWECePiy7ZFt8uO6n0jznXadGP1ffv0bqaxcpY0j+J6FBEiABEiABEhg/gnAARt5QPDyJ4rJAoLfbuUFwpdU5p85aySBUCdAA0io9xD1I4F5IoAcH2vLs2R12eQkwLwJnyfIrIYESGBRCOCBe73K/WG8P8rKlssLLzwveXl5i6LPTE4Kg8HQ0KC0d7RKR0ezPjQ/r1iSktJ0jolglNHRUensapPOzjYZHh6QrKwC5bmQMWfjhKm3peW2NNRfk7t3r8vg4JBK/Bwr7qQM5ZVTIdk5SyUrM1slC3cHo2mLWqfLFbOo5+fJQ48APD2+/e0X5MqVy/LaL2/L2VPn5O2335LKFcsl1wHrU+gRpUYkQAIkQAIkMD2B0dExOX21SfYda5gQ8SIzLUGe3lwiK5amT18JJUiABMKKAA0gYdWdbAwJ+CeAB4TdfUPSawt5FRfjkvTkOMYq94+O35AACYQ4gYGBfjlw4KDX+6Nmw1rZuvVh9eA9tL3b4H3R3d0h584fkc8+e0uuXjkmycm58sMf/c+yoeYx5U0w//oPDPSqEE3n1fl+LydP7lPnSJZnn/upPPH491XOEvesw1R1d7erh7xn5auv3tP1tra0SP9AnyTE388vlZCQICWla5R3zjOyfv12ZehZOmejS6gMzUgPdxYq/RCKeuTm5cuPfvQjOXz0sBz46oDs2/eVPPutZyUzO4deIKHYYdSJBEiABEjA8QQQ9qqxpVc+PV6nnoHcD/m9rChNtqzKdXz72AASIIGZE6ABZObMeAQJOJIAjB97jtTJ0dq7E/RfW5Etu7eWS2rS/D9ocyQoKk0CJOA4At1dXfLRno907g+neH8MDw9KfUOdHDr4J/n88/dVeJxz0tnRprxWiqRLeWbMd8H57jY1yokTn6tcBL/2ni81LUN6e+AJMiQJCbPzyoDx4/Mv3tX1Xrp4XGJi46SoqFgbc1LTslRIqH7t3XKn8bocPrRHai8cll2Pn5fndr8qxUuXhYURJEjOOvM9DFjfIhCAl21VVZW8sPt5uXjhilysPa+8QP4ga9asphfIIvQHT0kCJEACJBAZBJITYqWqOEPaezwBsBJURIyCjGSJVS+AspAACUQeAc78yOtztjhCCQyoxOfXG7rl4PlGGRoe0xRiYzzxp5/YWCKpEcqFzSYBEnA2gRHl4n7x0mWprb2qkhuOSOWa5bJ546aQ9v5AqKjrdbXy5pv/TQ4f3KMMDwnKcLNKGyWCUeBpAuPH22/9Vznw9fv6FDgfDBLw0phtQb2Dg33yzTd75J23/1E/2K2sWimbNj8t2x5+XvILSr1VI9zW2bPfyN5P35TTp75Sv38vscpD58UX/0Zysgt8qmA3KszGy8Jax2yO96mYjw+DWbeP0836I+TKaW5uVt4+LhX6LCuk58msGxmCByYlJcsTTzwl7773R+0F8vWBo3Ls2HF58umn6QUSgv1FlUiABEiABJxNIE4ZOzZU5UqpSoJuLXgpITM13tmNo/YkQAKzIkADyKyw8SAScB6B+JhoKStMli0r86V/0JMMLCHOJcuL0gXfsZAACZCAEwkMDQ7IwYMHpL7+pmRm5sjmmo2SlZMT0k0ZGxuRuusX5OaNS1JVvUl27nxRJWkckN/+6/8t/f39PnWHsaG9o0WQXwPFX54QhLhqVJ4wyO2RlpYtmRkeN//6+kty7txR9cZ5ufK++J7ExcbLG2/8Z81tLsUTTuttr/Hj2y/+T7L9sRdU8vkU7dlhDBDpyhMkX3m3IA/IG2+4tSfIieNfy6aNT2odrcnRTRvu3r3h9YZBOLA0VUdhUcUkeRiUWtvuai+TxMRUyc3J1zlVGuqvqs9aFItBHU6ssKhce5wg/wg+M3lXcIy/vCTg3tPTIS2tTd58KWiLPT+L3VgzF6bBPBbGjzfeeF0aG+/IurXrpLyiQuejYDimYFIXHWa0oqJ8ghfIJ59+opOkI08ICwmQAAmQAAmQwPwRQBY9RLhglIv5Y8qaSMDpBGgAcXoPUn8SCJCAOyFGdtWUKAPIxDdt8XYEvmMhARIgAScSwAPdL5UBBOGjEP5qy+YtkuhOCvmmZGcX6twblZXr9UP5b775eEqdx8bGtPFjz0f/Jg0NN2Tjxsdkx87vS0Z6tvc4PNRHPQhHhfLMMz+W9LRM9fDVJRnKyPDEE9+T8vI1Ul1dIxcuqFBVKv/HXAq8P86c+VKH1XInJcmjj35XHn74WRX66n5iSeMZAYMBDA8497dffFWys/OVB0KeOu7+w18YG5pbGuXQoY/l668+lBt1ZyYYhPLyy2TVqgdl27bduh6TSB3GHnNMWfkKWbfuETly+FNt8OnsuCNt7a06F0mp8nr51rf+TB566CnV9lg5eWK/yoXyttZl9/OvSEX56knhuNBG5Gj58MPfKm+JeHnm6Z9I6tqtk+Sc4gGCcGTHj5+Qd995T/dZZfUy2fTgJlm5slKNxVVSUlIsSerz1LR0iVJ9hgf3LPNDAF4g27fv8HqBICdIbW2tPPjgRnKeH8SshQRIgARIgAQ0AeQ/vdOm8t6pCBjDw/dzgCS5Y6SiIJ1eIBwnJBCBBGgAicBOZ5MjkwDcPZHsHD/2wgccdiL8mwRIwAkEEP7q3NlzcuuaxysC4a+qV1aHfEgZlytWP8A3D/HhwTBdgYdEVlaBDhuFHBrNzdeUR8OyCcnSEVYLxo+D33woWx56Rhs9YPyIioqSstIqbWiJi0uc5L0w3bn9fd/S2iwXak+rHCI9snrNZlm79mFJTcnwJ64/h9HigXUPy4oV6yUxIXGCPvBw+Wzv6+rh/D9pwwe8Y2DQiI9zK+PPHTl//mt5//1faQPQSy/9raxevUliVb4R5C+pu35eG2LA5fq1SwIvEhhLUpQ+XV1tOjE7Qm8NqJBfKakZWgcUsKy9IFJaumI8H8n9PCjw6ujq7lTGmA/kyy/ekbXrtqnzxU8yfqAep3iA6EarMqAMO/g58FWTHP7msDJ4ZGhjyLLlKyQjNU17h6xctVJ5LVRIerqnT7lXMPRm99vjBVIhu7Y/KmdPnZOb12/LkSNHlcFuHUORzQ4pjyIBEiABEiABnwSGRsaktq5NXvvonHSM5wCBYFleirz63BoaQHxS44ckEN4EaAAJ7/5l60jAS2BgaFRqb7bJjcbOCVTys9yyqixL3PH0AuFwIQEScBaB0ZFhOX7iuA7jhAe4O7Y9prwP5ubVsBAEjDfETM8F48KOHd/TD/jxsP+zz96S3NwSKS1ZocM0fXPgQzl14gudgBxhqKwJxhECCj/zVeCt0alCTt294wmhVax0QHgqaygrf+eCEcR4bxgZeK9cunRCPvnkd9pj48knf6S9MtCGmJh46VRGjKNH9uqwXWh7YWGJPl9ujserEYah4aFBxeayTr7+wguvqjfrt+tQXH19XcowtFKHGEOelYu1x2XVyo3KI6Rae4XAMFJXd0kbO5AI3nhzjIyMSnNTg1y+fFondq+oWDkhr4m1fU7xAPHVJ8id06pCfB3+pk3np4hXRrKsrBxZvWq5DpH1wPp16iF9jTaGwDsE3jM0hvgiOf1n4LdhwxZlmHtd7qhQdYePHZFvf/sFNZ4Lpz+YEiRAAiRAAiRAAgET6O4f0saPrl5PEnQciIToQ8OecOABV0RBEiCBsCBAA0hYdCMbQQLTE+jtH5b9Jxvk46N1E4QfWV0kJXmpNIBMj5ASJEACIUZgcHBQLl+7qh/eLl9WLWvXrHVE+Cs7RuQECaTAuABPjqee+qH2dEAC9coVayVVeTTAeGDCaD3xxI/0w3+7kSGQcwQqg5Bc3T3tOsRUbGyMFKqE5wnxCYEePkEO3hPdPV1y6tSX2pgFA87D2741ISRVZka2PLhxl0oef04ZOX6hw1shx4c9gToMFY888i0V5upJrzEDIblWr35ISkrXyN27e6Thdp1K/t6v86hUr1yvwoEd0UYOe30IrXVNGUyQLD4jPVPXkWwJ2WVtxFw9QDCW8RPs0ts3IL1DvvPMwBCCAs+Q+oY6/eOK3id5KndLcVmBPKDm18aNm2XdAw9IcfFSGkNm0VlxylBXXV2pjUswgJw6flYunL9AA8gsWPIQEiABEiABEvBHwBW9RKpLM+WnT1Urg8eYVyxZhf4uyg79l6X8tYufkwAJzJ4ADSCzZ8cjScBRBEbUwyqUwaGJD9r6Bu7HxHRUg6gsCZBARBNAbN+bN29KU0OD5pCdl64eyhaFfPgrX52GMFWBFnhxrHtgm2r7d+Xtt/5f+fLLD1RYrESVi+OgflC/actTsnnLs+rhdFqgVc5KDkab/r5efSyMDnFxsdozYLals7NNe7bAi6No6SplBFkhLle01xsDHhYImVVWukq9PZ+u21rfcFWHwbIWGE+Q5wReI1avDLc7WeLGDTRdne36EHiHrFheo40bqO/atTO6PuMpA4+QSxdPKmNJnw7HVVq2Uunku41z8QDBWD506LC8+8ePpKXNE85tthynO65N5cq5eObydGLe72EUwYN6/Bw/clL+qHT0ZwzBw32W6Qnk5RfIjl275IAKPQaPpdNnTsv2nTsduXZN31pKkAAJkAAJkMDCE0D472WFaVKcez/XnNEi1hW18ArxjCRAAotOIPA77kVXlQqQAAnMhUCqO04efaBQSnInJgdGCCx8x0ICJEACTiIwpp44X71yVRrvtuuQPXg73a2SDDuxBOoBYtqGUFiPPvai8k6o1SGvfvuv13TODCQJ37nzO1JUWDpveT4WgieMB329XdKhQmqhZGXmKOPE/VwcRgcYNZC/IyEhQYfJamu9I6PjXguQgSEGid2RWB15T+wFScxNGRoa0gYWGDVWVG3SOT4QBqu9o1V7lSAvCzxC4BmCBOrV1eu1Xghf5qv4+diX6KTPMJYvXjwn7737tgrRddmvkWXSgbP8AB4eMynGMwS/jWcIjCGffPKFIO/O07uekueee1aWLi1maKwAwCIZ+upVa1T4ugK5fOWCWscuS2tzk+Tm5QVwNEVIgARIgARIgASmI6C2loI8IIiCYS3wDHFFx0iU+N7PTVcvvycBEnAuARpAnNt31JwEZkQgLjZaVpdlSlXxxAS12AREqzckWEiABEjASQSQ/+OqCn+FkEkIwVSxbLkj8n/4YjwTDxAcj1BYMHIgz8elWvUWuXpojhwoCPuE3BbzmevDl774DDonjBsp4LUxODikk5HPJuwWjA1DQwPq+G5txEDi8hiXa4IHh+ecURIXm6DCnLlVKKvbKlRTrz7nXAqMGtVVa3XieBMGKzsrX9Wrwl8pjxB4hmRmZUllVY32GPFX5uIBgjqTlNGmorzIX/Xz+jnYwbtlNsUV7dJjDZ42VVUVsmnTJtm6datkZ2fT+BEgUORPKS4p1v2NuXvt6lW5obzZsnNyyTBAhhQjARIgARIggakIjI6OyemrTbLvWINYI15kpiXI05tLZMXS9KkO53ckQAJhSIAGkDDsVDaJBHwRwCags2dIem0hr5D8PDUplqEXfEHjZyRAAiFLAPkSbt26pXJQtOkcBVWVVY7M/wHAM/UACYVOgREGxojUtDzVDzd0Xo3Ork5lhArshtJ4TMBwAG+NQT95KYLdVhg1SstW6wf6JgxWdXWNNhBcvHRah7+qWLZBh+SaKsH7XDxAotTBO3fsVEnGl8nAQHDzgDQ03JJf/8u/yEcffxIwWrvRo6ZmvWzZslVK1EP89PR0Pe8QaoIlcALgVrVylXy6d58y5NZL4+3GwA+mJAmQAAmQAAmQwJQERkbvSWNLr3x5tl5aOjy5z2JjonRIrC2rcqc8ll+SAAmEJwEaQMKzX9kqEphEAO6fe4/fkKO1dyd8t7YiW3ZvLVdGEIbBmgSNH5AACYQsgfb2dmlsbBSE5cEb+vkFBY59CDtTD5B7ympwva5WPv/iXR36qqxsuQ4JhSToxcWVsk0lEF8IL5DsnKUqbE+xnD1zSG7euCTNTbdUYvGlUxoKMKCGhwflbpPngW9uTr4O+QRjiv5OeZPAswMFRgWrZwUSr1sNJfFxbp13ZC5eIAiDBeMGjBz19W/rMFgtrc26LVevHNPhr5BoPiszW+vkr8zFAwQeAXj7PzM7Z0L1MIwgPNZ8/UbltRfOS2r6RE9Qf23KVN4xxtMDRo+a9RtU0vhqbfSIVeHEaPTwR276z7PUmrVMea0h1Bs8cuDNhnHMPCrTs6MECZAACZAACQRCICPFLWV5KZKfkegVL8hQoVITfOdzC6ROypAACTiXAA0gzu07ak4CMyIwMDwq1xu65eD5Rhka9iREx1sQKE9sLJHUGdVGYRIgARJYXAJtbe3S2d6mHsK6JD83XdyJ9/M7LK5mMz/7TD1A2jta5JsDH+r8H6Vlq+SRR76lvRUOH9yjjSKFReUqvM7qaQ0RM9d04hGpKane8FHXr5+T06e/lpLSKpVU3L+xAOGuLildP/ro33RlO3d+VyceR96O5GS8kafyISgDBLxJkOzcWsCpSyVLb21p0d8hbFS06v+5GEBguIBxA0YO8EMYrPr6Syr/xxV9HuRVqapCWLGpx9dcPEDQRhhBfMWjNp/N1+/YGN9bf+PlgYTw2XnpsrRwqdDoMWH4zesfMHSUlZbqcYwE8/Bm61BGXeYBmVfMrIwESIAESCBCCSDR+dqKDCnM3qBelvI8+wCK+FiXZKZOvaeLUGRsNgmEPQHfd0Fh32w2kAQij0B8TLSUFSbLlpX5ExoPDxB8x0ICJEACTiLQ3d0tXX292nsAb7THxXs8CJzUBuiKB+f+PEDsHhCQh/fEqZNfyf79f9DJwGH82LHz+8rosUx7YSCXRXlZlWRlFfg1RMz1Yb1hjHBXlZUbVYLwjXL0yD6tU35+qcpF8pQOhWU/z8jIqNy8dUUbPz7++HdKv0xZs2aLri4tLVsl0V4mp099JU1361Vos2ad58QU1DWgwlE1NtbpsFTwTECb0f8qhbpXbjb/gXGjvHyNNnYgDNbF2uM6wTzOs2rVg8q7qHRaY9JcPEBmo/N8HZOSnCpl5ct1Po/Vq1erUHLVqi+UYUt5esBLAR42MM6wzD+BrOwslQx9uTaAXLlxTZqVwY0GkPnnzBpJgARIgAQijwD2LohwkeIjygV3NZE3HthiEgABGkA4DkggQgi4E2JkV02JPFiZN6HF+Bw/LCRAAiTgJALtbW3S2+1JgJ2fny/x6o1qpxQTAqqvz5OIenhoSIXBua7DWeGhe2NjnfaSiIn1uOjDOJCelqkNJQh9tWfP6zr5+2PbX5TNW76lvsvSyc+3b39OP8A3hohHH92tk5LjfO0drdKhjAqm1Dcg5E63DjnV3HxHrlw5q70wUBITUwWhqQIJo7Vs2WrZ9fj35O6da8pwcF7eeOM/S1vbbanZsEuFwypW5/eEHcD5r107K5999pb2tEDZ+vBzsnLVFm3EwIN4GEOOHvlY6pQ3ydGje7XhIVUlREeB9wd0PHHiK/23ycsxHw/nkdujsKhCli9fq5JSn9OhxDo77mgDTWVljSQrLr6MUVqR8WI39li/C7X/xyUmyLp1D2qjB7w8KldUyZrVqyRPhZFDaCuE3JoPrqHW7lDTJysjQ3IKC/X4b2tuUfOmPdRUpD4kQAIkQAIk4EgCY2P35E5br9TWtcmgioSBEqde+oyJiZGVpen0AnFkr1JpEpgbARpA5saPR5OAYwhEqwSlSHaOH3vBdywkQAIk4CQCyAGCEEWxsTGSmZkpLnVD45QCY8DeT1/3PsyHZwOMES0tTboJH334a/0Q3hgPtmzZKU8+9bKMjAzLvn1vqjwOh1Xej1UqfNR3tKFiiXpgDY+LzVuelQu1p7UXyGefvS2FhRXqIXeNdPd0yZf735Evv/zAi6i7+65cv3ZZ+pQXzYGv35dz5456z1e9cr08++yrUlhQouueqiQlpcnmzU9Kb0+bvPvOP2kjSH39f5LPP39fiktWaOMBSktrk9TfOqfPibA/Tz75I3lu96s6ZwjOEReXqI0hMIq8//6vNAPkAlmxvEYf39bWKEeOfCEXLhyZ0PapdJvJdzAwrah8QBtgkNME42rT5qdk+Yr1+gH1dB4e030/E12CKZuZlS0v/+gn8tJ3h5S3xwopXLqUeSeCCdxP3SrymjI4JklGapqWaL7TLj3Kq42FBEiABEiABEhg7gSGRsa08eO1j85JR8+gJMR6Il7kZbjl1efW0AAyd8SsgQQcR4AGEMd1GRUmgdkR6BsYlkNnG+Ws2giYkhCnkr9mJcmj64vEHe+ch4ezI8CjSIAEwolAb1+fdHV53pjOzc3S4aCcUGBP6O3t1iGW8DAfHhim4KE7CpIi4wclJjZOh4bqVLkv6q6flxPHv9ZtfeKJ72mvD6uXBkJGPfPMj5U3xk25UXdGe1EUFy/ThpPbt+smnQ/1myTM1vOlKo8SeI0EUmC8yM7K1waajIwC+frARzo3yaWLx/WPtcDwsXbdNh22C54r1oTpqAd/P/X0T/QhMMp8+Kd/lcNZH+m/+3p7tYfM+vXb5amnfuhtOwwPCNOEhOjw2IC3jK98HS5XjEq0Hq+NLympKon3uHeN0Q8cKyvXqxwma3RCeSQ/f+CBzSo/SM60RiDUMY2dyJxm0X+nq3Bx23fupJfHIvcEzIoJiYnaeIuCMdfZ1bHIWvH0JEACJEACJBA+BOD5AeNHVy9+PO2KUzlAhoZHwqeRbAkJkEDABGgACRgVBUnA2QR6+oflSG2TfHq8boIb6NaVhVJTlUsDiLO7l9qTQMQRgBFhaGhYv6mPZNgI3eOEggf2+XlF8pOf/C/aWBFIycjI1cfgIf2fv/J/qAf5CTrZOLwvrAXfwyjyd3/3n5TXR7sySOR6jAPqYT68LTZuejyQ0ymvjXR9vum8P0xlkEPi8x07XtReHEgiXnf9rA6tNaTCe8HYkKJCWRUXV+oE7cXKoIPQXPaCUFRI3v7SD/9e64o6GhpuarGk5GQpK12lkr5XTzoeBo9HH3tRKpW3iz/d4Ymy+/lX5OFt39JcEHLL6rVxT/0Bvf7i1f9TeZu8qs9ZrnRJTPR4sNh1tf/tFA8Qf8nW7e3h38EnkOhOEhikUHp7eqRHeWohZAfDjwWfPc9AAiRAAiQQ3gRc0UukoihNfvpUtTJ43E+CnpkcL0XZyeHdeLaOBEjAJwEaQHxi4YckEJ4EEuJd6sHZ/Wlv/X94tpitIgESCFcCMH6MjODheozEuOL0G+2LXWJjAttW4eF/5Yp1M1IXD9iXFrnVT6n3OF8P3REKa/XqjZNkyssqVXL0yoDP6avu6Q6GAQZeKAid9cC6h6Wvv097n8D7Isbl0sYElyt6ynBSMILk5hRIpjJSwJgzPDKi60hQSe4REsxXXhIcY22fL921h4eFuV0GRhxf/WKX88cAww96Dg8PKBFP3hN/svycBEDApcKPZmSk6VBYnR1t0t7eKQMD/WqecPxwhJAACZAACZDAXAjgGltekCrFuZNfZIl1Mfz3XNjyWBJwKoHA7tSd2jrqTQIk4CWQ6o6TRx8olJLcpAlU8rPcksQk6BwpJEACDiWAEFGhUtxJyZKYEK88H27rpN14MO+vBPpg3Xp8oMf4kvP1mT/d5vo5jAkJCW79Y9c/ED0gA3YIV2U/3p9ugdbr73jzeSD1+KsDXklj91olJ6cqJAxy/vTk5xMJdNc36A/cbveC91uimiMI3QYDSE93hw7zRgMIRygJkAAJkAAJzI0APCoRAqu1Ey+m3C/wDMlIUS/VjOcEmdtZeDQJkICTCNAA4qTeoq4kMAcCceoiv7osU6qKPeEWTFXYBDAJ+hzA8lASIIFFIzAy6onhG6M8L0IhbExuXr7yRMiXfZ9fkPqGOhWyKXCPi0WDGKQTz8WQYFSajzqC1LwJ1cL7o7+/Vy5fOqE/X7V6VUiMx4Vou9PPcfrUN3Lp2lnZXLNR1tesX/B+i4mJkURleEHp7hmQkeFhpyOl/iRAAiRAAiSw6ASQBP34xRb5930XZGDofs6Pgoxk+cHjK2RVqScH16IrSgVIgAQWjAANIAuGmicigcUlMDo6Jk3tfdKuEoENqw0BSoxy/0xOiJUC5QUCN1EWEiABEnACAeUgEJIF6+iGmho5e/66HD+2V71RnqxDOU3lCRKSDaFSMyIA48fJU1/L9bpz8uTjNZKXlzej4ym88ATG+qLl7OWv5Ow3X2rjx8svv+zNx7GQ2iA/jrWMjI4u5Ol5LhIgARIgARIIWwJtKvN57c02aesekJhxr2wkRe/tLw3bNrNhJEAC/gnQAOKfDb8hgbAi0NkzJH/65rocvtAo/UOeG+wE5RWypjxHfvJElWSmOiOBcFh1ChtDAiQwKwKLn+3Dv9rly1bIC7ufkDf+/V358INf68TcSLqdmJjq/yB+40gCw8OD0tfbJdeun5Pbt6/K1i0V8tDWLSqZdY9Eq7wnfLEgdLvVavx45ZVXJD+/YMG9P+x0hgYnhumwf8+/SYAESIAESIAEAieQkeKW9ctzpF0ZQOLH86BmJCWIW70AykICJBB5BGgAibw+Z4sjlMDI2Ji0dQ7KxVvtMjTs8QCJjYmS9OR4wXcsJEACJOAUAqHqAQJ+eOj94IMbVbLuWNm37zO5du0LqbtON3unjK2Z6omcH/EJifLsMw/Ktke2SWd7u1y9ek2NgQ3iYjLrmeIMurwvz49QMH4EveE8AQmQAAmQAAlEEAEkOl9bkSG5GWtkWEXCMCVBGUJyMxIjiASbSgIkYAjQAMKxQAIRQiA+JlrKCpNly8r8CS1eW5Et+I6FBEiABJxCIJQ9QMAQ+UjWrVsnlZUrlAHkmty8cVOGGdvfKcNrRnqmZ2yUyhXLJTM7Rxu/kMMhLzebYc9mRHHhhO2eH4WFhQt3cp6JBEiABEiABEhgQQhgL56SFCfJiZO9PZao71hIgAQijwANIJHX52xxhBJwJ8TIYw8slQcrs5zHxwAAIABJREFUJ8Ymx+f4YSEBEiABpxAIZQ8QwxA3XonKA2DlylX6x5Qxp2T2dspgWCQ9o5D5fLygr03JzskV/Fg/WyQVeVoLAX+eH4REAiRAAiRAAiQQfgRGlNfHtdudcvJKkzf6BVqZrJ57bKzOUzlQk8Kv0WwRCZDAlARoAJkSD78kgfAhEK3eTM1MiZdUd9ykRuE7FhIgARJwCgEnvbdlfxAeJU7S3ikjInT0tPd36GgW2ZrQ8yOy+5+tJwESIAESiCwCI6P35Gp9h/x+3yVB4vPB4VGJU1EvSvNSJD/LTQNIZA0HtpYENAEaQDgQSCBCCPQNDMuhs41ytq5tQouXF6bK1jUFkqpcRFlIgARIwAkEnOAB4gSO1JEEwp0APT/CvYfZPhIgARIgARKYmoAxfkwtxW9JgATCnQANIOHew2wfCYwT6OkfliO1TfLp8Tr9BgQK3oLYurJQaqpyJZWkSIAESMAhBOhD4ZCOopoksMgErJ4fL7/8spSUlCyyRjw9CZAACZAACZBAsAm4opdIRVGafH/HikkhsIqyk4N9etZPAiQQggRoAAnBTqFKJBAMAq6oKEmId0lcrOfHnCMxnvk/gsGbdZIACQSPAD1AgseWNZNAOBCwe3688sorkp9fEA5NYxtIgARIgARIgASmIeBSIb7LC1KlODdFRsfGvNLR6plIrIvhv6fBx69JICwJ0AASlt3KRpHAZAJIdP7oA4VSkutJ+DU4ouJguqJ1DMwkJkGfDIyfkAAJhCwBeoCEbNdQMRIICQJ2z4/CwsKQ0ItKkAAJkAAJkAAJBJ/A2Ng96VURMFq7BgQJ0VFgFMFPZmq8uPkSaPA7gWcggRAjQANIiHUI1SGBYBGIi42W1WWZUlWcMeEUcA9lEvRgUWe9JEACwSBAD5BgUGWdJOB8AvT8cH4fsgUkQAIkQAIkMFcCQyNjcvpqm7x34LK0dw94qyvISJYfPL5CVpVmzvUUPJ4ESMBhBGgAcViHUV0SmC2BUfXmw+2WXmmzbABQV0pirHINTdZvQ7CQAAmQgBMI0APECb1EHUlg4QnYPT8Q9ioqiivGwvcEz0gCJEACJEACi0ugratXTlxuku6+Ya8i/UOjyjNkaHEV49lJgAQWhQANIIuCnSclgYUn0NkzJHsO18nhC42CCz9KgvIKWVOeIz95okq7grKQAAmQgBMI0APECb1EHUlg4Qj48vxg2KuF488zkQAJkAAJkECoEMB9AqJcZKS4Zf3yHOkfHPGqVpSVIu6E2FBRlXqQAAksIAEaQBYQNk9FAotJYEQl/2rrHJSLt9plaNgTBzM2JkrSk+MF37GQAAmQgFMI8H1up/QU9SSBhSHgy/NjYc7Ms5AACZAACZAACYQSAdwnRC1ZImsrMiQ3Y40Mj+cAgY4JsS71WWIoqUtdSIAEFogADSALBJqnIYHFJhAfEy1VJWnS2p0/QZW1FdmC71hIgARIwCkE6AHilJ6iniQQXAL0/AguX9ZOAiRAAiRAAk4kgPCX7oQYKU9InaQ+jCMsJEACkUeABpDI63O2OEIJYAOwbW2RrFEGD2vB5/hhIQESIAGnEOBti1N6inqSQHAJ0PMjuHxZOwmQAAmQAAk4kcCI8vq4drtThf++4w2BlRDnkmT13GNjdZ4UZCU5sVnUmQRIYA4EaACZAzweSgJOIoA3HWDoiI+b6O0RHRWlXURZSIAESMApBOgB4pSeop4kEBwCds+Pl19+WUpKSoJzMtZKAiRAAiRAAiTgKAIjo/fkan2HvPvVFenoGfTqXpqXIvlZbhpAHNWbVJYE5ocADSDzw5G1kEDIE+gfGpFDZxvlbF3bBF2XF6bK1jUFkpoUF/JtoIIkQAIkAAI02XIckEBkE7B6frzyyiuS//+z9yXwUZVX+4fs+76vkI1AwhYI+44sgoKIiru1aq3a2n6t3b/Wz/771a97a2ttrda61KWKCIKsKrITEAhrICRAFsi+7xv/87zDncxMZpIJhGQmOef3m18y9773fc/73Jk7957nPecJjxjagMjsBQFBQBAQBAQBQUAQEAQEAUHAIgJCgFiERnYIAoMLgbrGVjqYVULbD1+g5tZ2NTlX1v6YPjqS0pJDqWt1zME1f5mNICAIDB4EJANk8JxLmYkg0BsEzGV+REZG9qYLaSsICAKCgCAgCAgCgxwBJ8dhFB/lR3fOS6KW1g79bFECKyrYe5DPXqYnCAgC5hAQAsQcKrJNEBiECDhxqSt3NydyddG9tCl6uIn+xyA83TIlQWBQIyAZIIP69MrkBgkCLc1NfT6T3mR+oP53Q32dVT64uroSXmKCgCAgCAgCgoAgYP8IODk6UFyEL8WE+nSZjIuTQ5dtskEQEAQGPwJCgAz+cywzFAQUAtD+mD0+kmJDvai57WoGiJOjqoHpJSLo8ikRBAQBO0JAMkDs6GSJq0MWgfKKSsrat4vGjJlCbj7XRy70NvOjpqaGMg5k0JmzZ6m5ByKmra2VoqOjafbsOaqUloODUKxD9kMrExcEBAFBQBAYFAh0dFyheq6AUVzZQK28IMKZCREYiJHQAA/ylEWgg+I8yyQEgd4gIARIb9CStoKAHSOATI+xcUGUOiLQaBYQQB8mD/t2fGbFdUFg6CEg4cmhd85lxvaHQH1LI5U115JDOzJBro8A6U3mB8iPD9d8SNvW/YeCPFrJ182Vml2CybWltMvfE+cq6dOjJ2lEXCJ5uHvSsltvJQdRGbK/D5t4LAgIAoKAICAIGCDQ0tZBx3Iq6OO92VRZq8tIdeNKGAFe7nTXTUmUMtw4JiLgCQKCwOBHQAiQwX+OZYaCgEKguaWdSngFRFGFcTmIIF8PigjyIjcXR0FKEBAEBAG7QEAyQOziNImTQxyBhNg4Gj1zMbn4X7vKmLnMj+6yNAzJj6WTo+mmqSnk5Rds9kzszcqn6qZtNLIkjJy9Xcy2kY2CgCAgCAgCgoAgYJ8IVNTU05HsEqptaNVPIIF1QeobW+xzQuK1ICAIXBcCQoBcF3xysCBgPwggBXRLxgXKOH2ZGpkMgbkz6TEmLoTuW5gsBIj9nErxVBAY8ghIBsiQ/wgIAEMEAdPMj+4Ez03Jj9uWzCVPd/OZJ4dP5dCGDduoqraRlkxPoJyKToHUIQKtTFMQEAQEAUFAEBjUCAT4eNKExBBqbG7TzzMqyIfvDWTRw6A+8TI5QcACAkKAWABGNgsCgw2Bto4OqqhupjP5lUZTiwjwJuwTEwQEAUHAXhCQDBB7OVPipyBw7Qgcy9xHJ1hDZEpaOj3wwANKn8OSWUt+1DfyfdD5Anp9zVZFfiyalkKNDY1MgJRZ6lq2CwKCgCAgCAgCgoCdIQCh87HxAaz3MUZpgGjmzmWwoAEiJggIAkMPASFAht45lxkPUQTcnB0pPTmE3N2Mv/aJkb6EfWKCgCAgCNgLApIBYi9nSvwUBK4NAVPyIzY21mJH1pIf6MCU/EiOCaIjXApLTBAQBAQBQUAQEAQGDwIOrHHq7eFCrkx4mBrIETFBQBAYegh0vRoMPQxkxoLAkEAANwBTUsNpHKeBGpqbqyOXwpJLwZD4EMgkBYFBgoBkgAySEynTEATMIGBKfkRHx5hppdsE8uO9996mHRvXEzQ/LJW9Mpf5AfJDTBAQBAQBQUAQEAQGHwJtnPWRe6may38XGZXACvFzp/RRYUoDVUwQEASGFgIS9Rxa51tmO8QRcHRwICdH43JX2CYmCAgCgoA9ISAZIPZ0tsRXQcB6BEzJD2syP4T8sB5faSkICAKCgCAgCAwFBNrar1BOQRWt232Oquqa9VMeHuZD4UGeQoAMhQ+BzFEQMEFACBD5SAgCQwSBxpY2OnDiMp24UKGfsTtnf0Tx6ofpYyLI18u8UOgQgUemKQgIAnaEgGSA2NHJElcFAWsQqK+lo1kHKe/LQ0rz4+GHH7ao+YHvf+01ZH7ADWh+SOaHNSdE2ggCgoAgIAgIAvaLQDtrnLpymW+UwHJ1aVcTcXdxJDd+7+IsYVD7PbPiuSBw7QjIN//asZMjBQG7QqCusZUOZpXQ9sMXqLlVdxOAm4LpoyMpLTmUfO1qNuKsICAIDGUEJANkKJ99mbu9IdBU00xuPrpFFob/Yx4O7U3U2NpEudmlRPxavHiqEjyPjIy0OE2QHx+u+VDKXllESHYIAoKAICAICAJDGwGU+E4eHkCP3zqeWltb9WB4eTpTdLD30AZHZi8IDFEEhAAZoidepj30EHDiUlcBvq4UGuBpNPlAroOJfWKCgCAgCNgLApIBYi9nSvwcqgg4OTlRS3MT7d7xCRXm55K7s5tZKEB+ZO3fScVFuXT33Su6zfxAB9eq+bFiXhrFhlgOeBQVlFBBYaFZH2WjICAICAKCgCAgCNgXAhBBD+O4R4i/h5HjDsNkGZV9nUnxVhDoOwSEAOk7LKUnQcCmEfB0d6YFE2OMSj+4uhD5ebkR9okJAoKAIGAvCMiji72cKfFzqCLg7e1NM2fNokNfnqSO8nyqtwBEfaMDl7oKp0cfm0CrV9+tyl4haGHOrpX8QNmr7sgPbaxmJmzEBAFBQBAQBAQBQcD+EejouEK1DS1UUFZnNBlnRwcKZVJEyn/b/zmWGQgCvUVACJDeIibtBQE7RcCNa16OiPAlCH+ZmqVgg2k7eS8ICAKCgC0gIBkgtnAWxAdBwDICXl7edOddd9Ptt3eWnbDcWrfH1dWyFtm1kh89ZX5oPoVFhVB8XHxPLsp+QUAQEAQEAUFAELADBFraOuhYTgV9vDebKmt1Cxyg/xHg5U533ZQkBIgdnENxURDoawSEAOlrRKU/QcBGEWhqaadLvAKirLrByENkgMSE+rAgmKONei5uCQKCgCBgjID59eGCkiAgCNgKAlhY4UDDyMnRMqlhra/XQn6gb2vJD2v9kHaCgCAgCAgCgoAgYD8IVNTU06mLFVRW1aicdnF2UHGP+sYW+5mEeCoICAJ9hoAQIH0GpXQkCNg2As0tbfTpl3mUcfoyNTIZAnNn0mNMXAjdtzBZCBDbPn3inSAgCBggIBkg8nEQBIYGAtdCflTVNgr5MTQ+HjJLQUAQEAQEAUHALAJOjsMoNtyXFk0aTuVMgHi46Up+QxM1yNdYF8RsB7JREBAEBh0CQoAMulMqExIEzCPQ1NpOFdXNdCa/0qhBRIA3tXV0mD9ItgoCgoAgYIMI2HIGCGoOd1y5QhBZtFReEG1aW2X1mQ1+tHp0ydHJmbMaHHpsJw2uH4HekB97s/Jpw4ZtalDJ/Lh+7KUHQUAQEAQEAUHAnhFw5Hu1xCg/Cg/0pPZ2XawD22C+ntefnWrP2IjvgsBQRUAIECvOfHl5OZ08eUrfMiDAnxITE6m7WsVWdCtNBIF+RcDN2ZHSk0PI3c34ax8b6kXYJyYICAKCgL0gYMsZIJWVFVReVkphLObs49OpuQTSo66uliorK6m4uJjKSsuooUGXkm8vuA91P52dnSg8IpxCQ0PJ39+foHNhieQa6lhd7/x7Q36cOV+gyA/J/Lhe1OV4QUAQEAQEAUFgcCCAxVKuHONwdHCgdoPFnnjv4iQLWcyd5cLCQsrJyTW3y2gb4qEebm4UFhFBHh72k03T3NxMp06dptraWjUfNzdXGjdunD6ua7rf29ubRo8edcPivtqzoenzRH/70eMJH0QNhACx4mQe2H+AVq26k9raWqitvY2WLFpIb739zg37IljhkjQRBHqNgLeHC01JDaeJyaH8OdaFD5Ea6sQ3AO4sCCYmCAgCgoC9IGDLGSBNTU1UVV1Dvr5+egIEwVw8UOzdu5dOnTxO9Q3evC+A7yNc7AVy8ZMRaG5uoZqaPeTpUUujU8ZQevokSk5ONiK6BKjrRwALj9599z3au20DLZ0cTbctmUue7l1Xa9Y3NpNkflw/3tKDICAICAKCgCAw2BBo46yP3EvVXP67iBqb2/TTC/Fzp/RRYRQR5DXYpnzd8/noo3X0zHe/p+KelszJyYWCgkIoMCiI74Hj6Q6Oky5ddrNdECFVvAjt4Ycfo8zMQ2p6iQmjaNv2TRQbG6vel5WV0Teefor27t6r3o8bN4nWrftAv19t7CMrLiqif7/9Nj8LutGjjz5iFFvuTz/6aDp2041EPXtxqkB+iAkC9owAVjy0UQeX77Dl8KE9Iyy+CwKCQH8gYMsZIEH8QIAVQ1qWKG5wP/v8M9q9axc5OsXS6NSVnEEQS56e3v0BlYzRxwjU19dyBs9FunD+BB08+AHNnTOa5s2fR9HRMZIN0gdYgyz88MP3eyQ/MJRGfkjmRx8AL10IAoKAICAICAKDCAEs+MwpqKJ1u89RVV2zfmbDw3woPMhTCBAL51pb9G1ht1oQXlB4Qb1AJGzf/jl9+9vfpO985zs2TYLYyrMjsjs+3f4p/fmFP9KOL/bQk9/4tiWoZfsNQEAIkBsAqnQpCNgiAo0sgn7gxGXKuVzNqyCuiqC7OlIUr36YPSGKPK8Kg9mi7+KTICAICAKGCNgyhQviQyM/Ll68SGvWrKGsM5W8imglJSVNIF/O/HByMi476ODQQR0dDhxAl7+2jIP2GYyJTlDn8uzZI7R3/y5+CCygu+68gxISRwoJch2XKsn8uA7w5FBBQBAQBAQBQUAQMEIAJbBcudKFn0Gyhxu/d+GSpmJ9g0B5eQn95te/pajIKHrwoYds9j7YmmfHtrYbv+B9185d9PjjTykCCebpLlq8ffNJtK4X+eb3gJOtMIU9uCm7BYEeEahrbKWDWSW08UCOvi1uCqaPjqQ0LoslBEiPEEoDQUAQsBEE7OG3GXV0NfJj5ozlHDAfSy4ursT66OplaO3tulrE8te2cdDOGQiswIBgmpg2R2Xz7PxiLf3n/Q/o3rvvpuFx8Tb78GcjX1+zboD8sCbzw7DslWR+mIVSNgoCgoAgIAgIAkMeAeh8JA8PoMdvHU+tra16PLw8nSk6WLKwrfmATJ85nVatXKXKNGmGTOjDh4/Qpk82U01ttdqMv++9+zYtZKmAyMhIa7q2yTbQ9/uvp7/D9/P3cdnbJgoPD1Oaf31ppWUlVFRUoLp0cjQfjjf0A+0CAvz63I++nJM99WUecXuawQ321RqmEOI1hqYJYiKN/+jRTMrNyVEf2DGpKV0ejFEa4/jxE4Qvgq+PH6VwG2vKKOC4M2ezqaS4RImqurq5UDDX4ktMSqTIqGj+MnUv7ITUq+zsbMo6fYZa25r5QhVN48eP09extjQnU7gN+4EfISEhlDwyiSKjo0UjxRSsAX7vxOWvAnxdKSa0U5QXLgVyHUzsExMEBAFBwF4QsOa3eSDn0tDQQNu2blOZHyA/kpPTyNHRsQvxMZA+ytjXjoBGYDk7828qZ4PMnrNSkSAfrV9PD/Hqt8DAwGvvfAgeiftlTfNj1ewEWjx3mmh+DMHPgUxZEBAEBAFBQBDoKwQQkwsL8KQQf2ORbodhw2gY7xPrGYGExCQub/Vf+oU9iHoCOdy3vfDCCyrzQyNBDn6ZSadZYFwjQCzFE3PPnaPDRzLJmbNwJrGWXnh4RJeFQ1qMtKCggCAQHhMbTfHx8VbfX2MRGnwxjLFqOh/dzdrHx4fuuHNVd02M9uF5Lzc3l3LO5VAl64vA1/iEePY1rlt9QOiomMoraHjhcwth+d74gWPz8/Mom+PDmDMM8d2UlNE9YmbuPGn9nTxxkkpKSigqKopGsRi8PZNb2okTAqSHj3dPq0xBALz44ou0bctm1VMcfzF/9rNnFanxu9/+mnAhQFqYm6sHjUweTQ8+eA+L3DzK5S+c6D/v/Yf+8c9/0InMk+rC4ePtSyPiEumxxx6mBx643+yXBivkXn31FVr38Xo6c/ocVVdV6L88gYEh/OGMoQU3LaKnv/k1i2I9Z89k0Z/+9Cfatu0LOn8+Wx2PY6fNnkrfeuKbFBMTRf/7q19RCV84YI88+jjdvup2owsTvhSHDh2kF//2Eu3bdUDfD+YQGhpBCxfOoUcfe5xLfozrckHrAXLZfYMQ8HR3pgUTYyg5JshohNAANw40ON+gUaVbQUAQEAT6HoGefpv7fkTre8Tv44kTJ+j4yWxKSpykMj9AfnRnVzii3tHRmQLtwKT0MH5Au5FmOOaNGM90TpjLjRjnRmJkTd/IBgEJMnbsDMo4uJUO7D/A92ELZBGINeBxG8OyV70hP+ZMGkWxIbKC00qYpZkgIAgIAoKAIDAkEQDhIXbtCLS2tujvaTUkQRQ88MADtGnLJr1geH1dnT74jtE++/RT+tNLf6a2+kby9Q9QGiE11TX07M+fpYx9GRwPdWEdvRn0y//7NU2YMF45qN0Tvv3uv/UxUmRJhIVFUWpKIq2++14Vl8T45gzEzJtvvkWGxxvGYRctWmTuMP02jP+LX/yCsk6dVNu02G5oWJjRcYgBo5TVKxyXPfzlMSYfLlJTc4OK+UIgHr4ihmooDn/kyFH61a/+j7KyclRbGOKwr732Lh3M+JKcPN3pe99+hmbPnqVwsMYP9IFnztf+9Q7rimylgoI8FXuGIb47clQCrbh1Od133/1myQvNp+rKCv34IE0Qb3777Q/ofG62UZxai2Vbwl8NbOMmBEgPJ8iay2XOuWzazCs9YYkJBcz6vU1/e+kVyj53Wt87PuQQCXruf7IJTCHMkDHFe5AgaPPjH2Wr/U888YQReYAv9M9++t/08suvdGEM0R4fdrzQR2FBDv3pj38k0y8rPuTfePop/YVKDXT12A1rmVQ5ns21++6lz7bu0Nelm7dggdZM/UVwZ/OmTcwGP2M0R+zDHPDC3PfsPUS//c3zdNPCm4yOlzcDg4CbiyONiPAlCH8ZGlZAWPM5HxivZVRBQBAQBLoiYMvXLGRD7t69mxyGBdKkSXN5hZNr1wlc3QKSAGKDlVXl6ne7uaVR7fH28qfwiOFKL6Qn8sRi5xZ2tLY2U21dDV2+dIH/VurHCw6JJH+/wG79tdCl0eb29nYuNdBEZeWlVFqSr5+Tq4s7+fmHUDg/xDg7u/X5vKzx7Ua0QTYIztGoUWlUWlpIn36+h8ZxRu1gWCV1I/Ay7LO3mR9vvbOe/LzdSciPG31mpH9BQBAQBAQBQcC+EWhr76C84lo6nlNKzW06/VPMKIjvI1LigyjUJDPEvmfb/957eXmRj4en0cDNTS3698je2P7JdhXsRzA+LW2CCqojVglD8D8nt0BlgsCQ9fHznz9H//znG3qCQGunia5DNBwLzH78o+90yWzAPeVzP/8V/fUvfzQ6XovD/uTHp+jz3V9QeVmZ3kfTf+qYxMk4lKGPlY4bV8l9NRs1w+fqjTdeox/96Fk92aA1wFiar3uZ5Hn2f35GTz31lCKQMK5h2TDtGK09yJOH7nlQbW5qaurRD8RkQTI9870f6TE1dBRx4b27SxTZtGv/Xnr+5/9LqampRnMx9AlE05IFi1UpM9N4M+K7J08cVbHstrZW+ta3vm23C72EADH6CHR909tVpmD/nvufn5Mz1/lewjXwwHaC5cMHBl9yfHj+9xfPq4FcXJwttgFrecsty4yyODZu3EBvvfWOnvxITBhFI8ckkicHFeo5aHI045ietMCXa8WKz+iee+7VTwpMIljHvbv36rdFRQ6n8ZPHqj40P+EfAjIwc3XpTjEj+uOfPKsnP5D1MX3aZDVXsIf4smtkDhheZJQkjUzWjyn/DAwCuFhfKqunooo6IweCfD0ogoXQQZCICQKCgCBgDwj09re5P+eUk5NLhZfKKS5uDnl7W64bC6IgL5/TwL/8lI4dz+Ab/zwmDmqVq87OSPdOokkT53J6+AImJoKuOyME45VXFNOBA1s5g3NHl/FCw2Jo7JjJNGXqUiYpontNUKD/6poKJQx+MGM75eSc4hv4BqM5ubl5cFr4aL4Bn8YkwcwbQvD057k2HMvNzZOGjxjFxM9xlXZvLqV/oHyzxXGvJfNDyA9bPJPikyAgCAgCgoAgYHsItLVfoTMXK+itbaepqq5ZiaHDwgM86DHPMUKAXOcpq+aST5eLdQuptK5Qlt/wf8Q7m5g/QHbIn1/4m177QmszbdYUfl6KY70NVNX5k1Hgfdy4SVxCOF7FObFIGwusQTCA4IgbEUmPf/3r+rL/IAM+XPMhvfLyi3ryA4RCWvp4iuZSUFqsFAu+uzNU6enJtm7eTD9/7ldGmRbpE8d1iYUiHvqH3/+Z5QGSORNkqeoWMWJLBqx6Y6jGY0h+IG6bkjpeYQZDZgowQwxam/df/vCCUXzZhYkZQ5/+8Y/XVNbHiBGJlDZxbJd+MKdX/vE6LV26rAuZ0hvfB7Jtz2d4IL2zgbF7u8oUX0pPrxDOfPgV3bX6LlW/LSMjg+6/7yt6wgBtQBr88vlf6ktdITPj4Ycf07N3KIuFGm5arTpcFE6fOs7pXrpgCspM/fWvL9B8zs5AnTjsf+WVV5kN/bE+CwPtcTHQNEk+/2yHYh01w0UFGRqzOM0KrOTFixfp10yQmDJ+hqcB4yDFSmNuMQ8wmyjrhVQoPNC+9NJLiuTBPME4Qhj0xz/+iZTCGuDPc3VdC23JuEBfZBboPXFn0mNMXAjdtzBZCJABPj8yvCAgCFiPQG9/m63v+fpb5vFvaW2tA5ekTGLSoqvgOUbQyI8P17zEKdTruJZupfp9d3d3Vw4UF2fTieMH6HjmTqqouESLFj9wXSQIMk0uF+XTxo2v0o7P1nD/l9QNb4C/TquiojJPjZd55AsO4F+mZcseociIWKtJF20+W7e+RRkHNqubZ5jhnBoby9Q8j2XuZn20z+ls9hJatOh+VT6qr7Nc1OD9bDjXQUER/IAdyRptZ/X3Vv3shl0M11vyQzI/7OK0ipOCgCAgCAgCgoDNIdDY0k7NrbqXq7Ms+OztCXJ0Mg7MI754+fIlLoX/Gp3JOqXvDqWf4uMTzHavZUYgSI9F3B6eusyRW2++RR8vRSkoTRcDC8l/87vf0+jRKdS6DdakAAAgAElEQVTBzzB79+yhp1mcHDFI9IVA/c2LF1Fcgm48+IPMBQToYSA/vv7E1+iHP/iBqoiDGObmTZvpe5wtYVilx6yz3WxElspLr/5dv+gcmS3PP/8crV59r4qFatooWiwU2R1vvPkGzZw1kyZwBsy77/yby4Z9Ti/88df6ua5evZruufse8mRMxowxztCw5ArGgRSBFpPFfL/61Qfp+z/4oYof4xxlZmbSz/7fs3ryY/P6T+jNtHTG4HtmszeAPfq7ZeVylS0C7GHIMnnyyaf1uOEZEponptkklny1te1CgPRwRq5llemy5Uv15Ae6hw4G2E3ty4Yv/uyb5hjpfIxmUZkZ0ycZpS9pAjbow9nZRTF899//EJ07l8vZI1wzb/58PakAAiOdRYQQbNC++OXlVbzyUlezD2W3Pt60weiiAK0Rw/JU+LJ861vfoqPHj3UpkaXBVJifr+rLaYbMDwh+anXgIPz5GJMhWj1AfJHwP7aZluPqAXrZ3ccItHF9+YrqZk4FrdH3jJuAiABvwj4xQUAQEATsBYFr+W3uj7nhhhMieLCgwGCLoufIlIBoNsiPRs6SuGnR3TRt6mIKCYmilpYmFtQ7zjpdHyhSYtu2dzizIJUmps3h3/7OlUOaELe5eZmWO0YmxoH9n9CmjW+p8WbNuY3vOZZwmaZ4NV5BYQ7/tn+gyAkQJBER8RQaco9V5bBAfoBc2fDxq7Rhw+vU2tLMWZ9pNG3aItY+S1PlvGAot3Um6zDt27eVzp45rFLBW/iBZOXtT5olW0znYDhPS3PXjtH2m+vD9FjDNqb7DMfsqR2ORckyF34IKS46Y+60yDZGoDfkx8EvD9Nb6/dJ2Sv55AgCgoAgIAgIAoJArxBwchxGKXFB9J070hQBopmXhxvFhvn2qq+h2jjz8An6wQ9+ynqxnbGi2jpdeabDB48alZqaMXOKEiq3ZIiBfu1rj6ogfXBwMFecaVOBeFQp2bp1qz47BAusoZ+hBdhZFVHpYiB2+cx3ObucCRAQLwcOZugJEGReQ3tZM2R+PPPMM/r4I8a5dflyLrmVQz/58U+N/Lbkr7nt0Hnet3O/fhfivhr5gY2aNsqBwweV9klIZCRNnpiu2iNOithrVVU1/fWqCDowSUocQStuW2FuOIvbjh7NVJIFmkFL5cc/+W99+V0sgIeuys9/+hzl515SMWbEZT/4YB3de/fdetxMB0B1IOiQGJIbiDlD31mLZbe0tPJzo04r2vR4e3gvBEgPZ6m3q0zBvs2cNkUxmZqBNQ0KiDAaadbU6eTl1SneiDaBgX76NvhgGdbQw4cYXyi8tHJSuFiAhbyYl0enTp6iNR+vpbIynegNOqrj2nEITMBqmSU8l31W3390dCzNmTPbyCe8iUtIosmTJlskQLLOnFXiOprNmDW7S/29wOAQoz7yzl9SPgoB0gXuft3gxmRHenIIubs5cQCsTY2N/2NDufyVrIbo13MhgwkCgsD1IdDb3+brG836o7HooLWtiVceeXJWg+VbLOhvHDmyW5ERc+aupDvueILi41JZEFC3Mi0hIVUF0osun1e/uSAOUkbrbqBBZsBQTgoCgqZi6dD4UKWn+MHCw92DHy48lB4HymwhA2PylMV0151PKXF2jVCBfoWXpy9nrhSr7I2zZ47SjBnLOEMkuMfJNzTU0O5dHzKB8r4iP0Cu3LbiEcIcUAJMIw5ADmEOIEU+WvsqZ4ps4RVdG2gkkyUgi9zdPfWEETJWMAcs6GhpYUx5TtBSweIPdzd3vsfy6ZI1os0bDmN/R0eb0jppaKg3Ot5Q58TwGCw0AVameKI/TdekobGBnDk93tz4aIfzBxwvFzmo1WZ44BLrRKA35MferHwhP+TDIwgIAoKAICAICALXhICTI2djB3txqW9jnQp0JsLo1kGKwLmWZdDdEcjqePSRR7vEBQ2PQVmlRx973KgEE/Yjm0ERBhygh42I4/JLE8Z1GQ6LvZFlgqwKkCCHvjxJd97VocpgIfO6uqpCHQNS4ebFN3PsMdyoD8RT582bz338WZ/B0WWQHjYc44Xi2jha3NdUEDwyKppe+evfycnZmeO9XupZoKeFe719ZkD5q6IiXVUX+LFw8RJVetfUsMh+3ryZ+nOIZ7zDRzItEiAxIyJoZFKiUTfANzwiUr8N2Fey7IFhpSHTcW35veWnc1v2uh996+nDauoKarf5+xkHDEwvsAhYhASHGpWEQhsvbz/1hdW+/KZ94z2+HKeY4fz8889Uaa38wnw6c/pcFwEe7VitrER1dRXV1+p0PbAvOMyfgoOCugyBD3hCgvGH3rBR4aU8VcNPM7CIuGCZGur0aYYAT1mpZbEh02Pl/Y1BwNvDhaakhtPE5FCjAZycHMj9ak3MGzOy9CoICAKCQN8i0Nvf5r4dvfveWlq6z6hDcN+ZA/nQwggJjVKZHyOGJ6uAvpaBAOIgbkQKhYWPUNkSNZwx0tjUyERGCWdybGStjUrOLp1F48fNYCKk88EOgfrzF7Jo395N/HBQT5PTFylxblhERLQiJyDMjrFBKGjjoY/EpAm8IitOESA1tVWKOLBGe+TcuRO0f/9nagHGpPR5ivxITZ2szx7pzMYYpggRZLJo1tRUzyLpTUaAgpTAww20UQ4e/IKFxXP1++HfqFETaNr0m1XpLG3uhvN2dXXhurULuHRYMe3Z/QllZx/T65DEJ0xUeAMDLy8/JT6/a+dHquxXUqIua8UQT21gkDzQTTlxYh9n44y2mhwymtgQf9Mb8kMyP4b4h0WmLwgIAoKAICAIXCcCCNA2trRRNet/QA9EM2SGBPi4S/nv68RXOxxl9f/7Jz9U1Wm6M2gXx8fHdWmCzPkKg1ghnkOeeupJcvLUlQXWDkBGheFi7zIuEdzS3EQOvDCqmDPRtRiqBwuzYxzENU0tIMCfAjkGiueM3hrisNB41MZB3Dck2JhkQZ8Y13Tht7Zwry+eX+FHzjkWI79KGHkyyQKdEU32wHBeIF8MY7tYWJZ15rRF8gJ6Ke4GC/m1vvxZ63mwmBAgPZzJvlhlirp1hoYvi2H2Rw8u6Hcj2wPiQKiPB8bPkCgBcQJiBYycZl5ubqTV7KtvaKKG+nr9Ph++MICVNGcuLg5miRhdaY9qvUA6jtWxwuZ66dwGwqS6pqr7RrK3XxBwdHCgNuo+ONcvjsgggoAgIAhcBwJ98dt8HcNf16HIMADhcd/931P9IEsDv9+mBpIEBq0OiNQ5caYoSIvCwjxVOut87llVXiqZMyq0xQ4oRbVl879VNsZwJlBAgCATJTQknO695ztGWSGm4+G9Kz9EaIZsC3PZEIbHgcBAua4L50+y/pkXTZ06X2V+ILPEUjkpzAGZIAEBoTwfN87+CFF/0R5Extmzx2jdes4Q2b9FaaL4+oWpbBdkhFy8cJyyTmfQ6dNHOOX8G3qiBdkehQW5XN/3DVVXuLS0SC/CjmPxKi7KpS92rOWHhi/VFGbOXKqyS87weNg+ddrNitwYMXyk4RTZL+in5NGWLe9y1s4OuuWWhxQBImY9Ar0lP16TslfWgystBQFBQBAQBAQBQaALAojBnTxfRhv3XKCKuka1393ViQK9PeiWmSMoZbhOB6/LgbJBj4AWYzSEBPf7UVExFB7qT6gGc9tttym9CHMBeMPjYsJYJ89MZnQzL/AyXKiNIP3mrdt6PAslXIYJZbRwnlGWy9A8OKvcnLmZGd9cO0vbmhp0WSbafuh29Mb64vkVBEh5hbH4fFBw14Xteh9NCI0WXiBnyVw93Pl5sytFgPjwYLGusxssM+ujefQFS2eaAQLXnJ17Bz0+6L/+7Z9ZMOe3euID9fGQIjYuLVXVlvPkD/dPuc4bGE1crG6EIQCDQA3IF4yBVDawud2Zp4s716OL7q6J7OsHBLAC4sCJy5RzuZoam3Wl0dxdHSkqyItmT4giTzfzhFg/uCZDCAKCgCDQKwT64re5VwP2cWOQAIEBumzRzgyJzkEaG+vpzJkjnGZdpITKockBosTby4dmcOAeWQ0gAnbvWc9pycNVqSpkTpw6uV+VlQJxMGvWUkVGYCyYVu7K3HgtXLoq++wRKsg/qbTE4kYkE+4xejKUhLrE5bywImtk8mgmEFJVeShL5IfWHzJB0M6BSXlDkqWcszY+++x9RUjg4WrhwntoytRl5OsboDJSQEB8tPZF9dfH15/8/EPY107CorGxUQm8w8aPn8ckx60Uy2RTW1srHT2yk95773cqw2UfkyspqVNU6a24EUk8HtHZrAwmck51EWVva2vh7acV3u5MpIAkwXkQsw4BIT+sw0laCQKCgCAgCAgCgkDfIQDdj8tl9bT3VCHVNrTqO06I8qN5EztL+vTdiIOvpyWscQEdCcPYpQvHMbFgCuWdkBlgjvgw95zm4tq5OLs7pPD8AZ1h0wwQw2OQDQLyRQvWOzh2yg+gXWurrpxWd+P0xT4spOqNmcOlN8ejLeYMosLQmpqae9uN1e37wmerB+uHhjcmSt4PjvfXEH3B0plmgFyL7yh79fG6tXryA3X2fvzj79PNNy+h4BBdOa0TJ06ooAfMtIyWr7eXWhWp2eXiSqrjzAyI8ZhaZSVneVxNqTLch4tbgEH6E9o8+thD9P3v/8C0C3lvgwjUNbbSwawS2nggx8i7eeNiKI3LYgkBYoMnTVwSBAQBswj0xW+z2Y77caMhSVBbW6nKWzXU1yhR8jNnDtKOHRsIAf1Zs1dwpsM0vT4FsicWLryDPnj/L6yjsVlfuikv/xyTB2uporKcFi0CcbBUlXnSzHA8ZG5AF6ShoZrLWtboRdAhTD5uwhwuEbVAZWX0ZNXVFcpvkAQoT4WsDi0bpadjTduBwMnNPUFHj36uDp08ZQktWvyAInegIwLCyNdnBftbwZmwv6LjmTtp0sS5irAwNGiowZfFS+4z0lWZlL6Ajh/frwiQvItnWYSwlPsMZQ2SdF7MkULnOYslL+8MY1KjSnXBMC50RM5wGTKU8xw1Kl2VJtNIJaOB5U0XBHpDfuzef5je3iSC511AlA2CgCAgCAgCgoAg0GsEUPkinPU/po+OpMbWTgIEGSB+Xj3f4/Z6wEF4QIBfAI0ZO9ZsOanupmvuOc2bMTe3MNyTdZE9vTsz4bHA+28v/6OLVoil8VClJjTER1/BBs8BxSXFZpujMo1hVRyzjSxs1DSbNckCjFNS0qm/rB0Gf+rqapmEaVWxWUNtaHO4WBjO4mYQSYb60tBfLCnu6ofWQQXrdWgGvZDQsN4tTO8Lny1OZgB2CAHSA+h9wXiZ+6L3MGyX3VlZp/SrGrFz2S2L6cGHHjJiXCs4FQrBEphpBoivv79KU8u82jOCHBcv5nW5sDQ0NNDhw0e6jK9tiIriTAFme7VSWxcvXDAr9AmBdszbHCNssXPZcUMRcOKbgABfV4oJNV65GujHqW68T0wQEAQEAXtBoC9+m21lriixlJd3jjZz6ap8JjEgRI7faNSovXnZ/Sxed4cK8muZEiA1QG5opZt2fLGO67V6cknKXSpLAUH6+fNXUTjf4JorYaUr6VRAW7e+RadPHdGPp7JGmGzBsRBINyUoTPECOdDa2sRZKmUqMxQZGR4ePWeNmPaD9+gLJa4g9g7Rd2S9QOPE10dXc1YjbzB3EBbIDgGRcf7CSSYsFqsSV5rhHgU6IeFhMUa6KsigCQoKUyXFWltrFfEDCw6JpsTEsUprBaW1QOhoBAgeYsrKLtHJk4dUW2inIONGrGcEIGr5+uuv07svv0yjEoOYQGqmLTv2mT2woqKKdmXmkp+3O82ZNIpiQ7zNtpONgoAgIAgIAoKAICAIWIOAC+ucpowIYiF043sKR9Zo8PXUZUdb089Qb9POWdROjr3Dy/Q5zTQ2aYipt7c3QXtCMzwDFRcXd4lT4p4cC8tNtT0Qb4yOjtZXqUGc8tTJ42ZjlLnnuRwaLxS7FsO4cXEj9eNg8Vdu7hleOK4TYtf6bG1tod/+5nn6dMdOSkhMosS4eLrrzjsoiXU6+sLgR/JInu9V7WhkyO87cJhuXX5LlxJjuBeHbrRmeEYaM2ZMr2K0pueyL+YwkH0IAdID+n3BePVFBkhzUwuvCu1kriH4gy+XVkcPJbK2b/tEaYPAkJ0BdlAzXz9djb7tn36u9qFM1tq1H/Iqz4lGrOQ2rre3Z/cBi6iMGj2KRo5KoL27dSzjtm1f0IEDGTR79iz9MbnnztHvfv87NT5Ed0YmJXFQZVaXL6TFQWTHDUHA092ZFkyMoXEJQSw6qxuCtWLVCgjsExMEBAFBwF4Q6IvfZluZa0dHB2cZVKqyVshCwEoeGAgQzaBxoRESIDWg6XEzZziUFBdQ5pEvFAkBjQscs+CmO1Tpq+4IDGSaQEPk9OmD+vFAgED3A9bO9wk9ZTn0VOZK77yV/yiR97Ii5U9oWJzSN3Fy6hSGRzeYO8peIcPjDC8MKecsFhxnSICgTFV4+HCjbZoLEEjHfsPVXyiDlTRyPOuq+CuNEZTBioocrkqGgZRB+auiy+cVKYNMHCl/Zd0Jhajllzu3U1RIIMUG+1FxFT7XXVP0q7Agp7SKaquv0Ip5Qn5Yh660EgQEAUFAEBAEBIHuEEBg3N2FywX5O3Zp1hcLlLt0Khv0CJg+p5mrLqM1hjbyLC5nte6jj9Uia8Qz3/vPOl4INU4fPwTJ8No/X6bd+w5QSnKyIjymT5+hJ0nGjBnL22Ip+9xp1a25GCXIgDff+Tc/O1jOlujpFI4bP14/Dua0/uPtdP/9DxmRGxcvnFfboZW8d/de9UyxaNEis12jD+iXaLrNZhuZ2Thp0lQlRYD5og9UClqxfIlRTBaE0caNG2j7dl1mPbqZNnsqpaSMNtOj5U2m59JyS/vYIwRID+epLxivvrjAmmZerF+/mdLT36HZs2ZSdW0dvfXv9+n1f/3TqHQVSBKNsQVTiC/eG6+/rb8w/PPV1/hB3o3uWn0febBg+s5du+mXv/y1IkcsWXh4BK1auYoy9mWosfCl+9FPfkg/+eFPuJ52imJrX/zbS/T2G2/pfXnw4a9wKYvJQoBYArWftru5ONLwMB/OADFeBSGZOv10AmQYQUAQ6DME+uK3uc+cuc6OoIMRF5dKX3n4h1TDJaVqaytUdgfEujdtfIs1NvJ55dBTKitDIyXwFyQHyI6338qlQwc/V9mZ02fcQlOmLNJnMJhzDSRCTEwC3bbyEc72WKnGu3Tpgio9BfH0wsKLat/EtDk9kiDo383NU5XAauZ7jusx1NGtqa1Siz3Qp4dnV50NZIpg7ppYe011JdXX15K/n3E5T3PkDcTgzZX1wlgoawXReBBCKIPV1DRXESAQYkTZLJS/Sh41Wel/mBOsv555D+Zjo8JCaezkaBo1MoEi/XWrBwsrm9X/hn93HDhJFwsKKcC7dysMBzN2MjdBQBAQBAQBQUAQuHYEEAC+UFRDx3NKqblNp3+K3rzdXFT571B/Y92Iax9JjjRFwPQ5rbsMkGFMVC28ab7SEgRpgBjj6/96he/t3ZXAuhan/Plzv9LHKUEqvPZaZ5msxMREunXFStZLzjYbo0SlnFf+8Xf6z9v/MXW1V+9HjkxSlXj++Ecd0XLyxFH67o9+QN/79jNMxsSoCjt//euLhO2azV80lxdQperfexjodwCXjEMZdPjLQ2pROrJhYmNje/RpNC9Kv/vuFfT8853zffrp79APfvBfNCV9ssJg2/bP6P+e/42e8AkMDKGH7nnQrARCdwOansvu2trDPiFAejhLfcF49UUGCDIv0ieOo82coQEDSfHEE0+pgAdWS+IhHV8g1HXTylOV8xcdmSFalghY1Acfupf+9xfPqzY45vnnf0t/femfqk/UxEMQAxcUSyQImPRVq1bRti2b9b6A2bznnnt1ZSWu+qLBCq2SR77yVRZV7RrI6AF62d3HCDS1tFNWXgVdLq0z6jnQz43GxocQCBIxQUAQEATsAYG++G22lXmCkAgJjqDgoHDlEn6HK6vKObtyqxL83r9vE4t1c9ZDSCRnfkToBcYhIh7Mx0HfC8fAgoPDrBIvR4mnqUyUwNrb26m6pkJlN6xb96oSGAfBEBoaS8Njk8yW0VIHsqHklUZGIAsFmiLWGsgMmLlMEq1PS325coapNmdLbazdDj+ALcpbgQBBGazLRXlKpL20pFBl5iBrRFdWK6pbPKwdcyi1iwz2oZQID3J1cebs01YK9dWVSQv11WUp+7k70mk/VyZAhhIqMldBQBAQBAQBQUAQuJEItLR10JmLFfTWttNUVccxMc4GgYUHeBDiH0KA3Dj0TZ/TussAQduExJH01FOP0DPfzVYxSmRpPPc/zzHR8GflJOKUWowTMc+77r6XF31N108A8c6Hv3IP7T+wQ2VdwMzFKBErBRFwrVkgICme+PrjirRA/5jXhrXraef2LyzGQp/6+hNGFXdimCjRslVwPBaWL7xpEcdL/elnz/6AHnvs6/p5WfoH833k0a/TkZOn1PgwkEdfffgxFR+GGWIGYfnvf/87tHTZzZa6tLjd9FxabGgnO6Twfw8nSmO8rudBuzcZIJYuDpGRkfTdZ77PqWCT9B7jIoAvLy4SIC2e/Z9n6a5779LvP/zlMV7JmKd/jy/KE088QV9/4mv6AAnGQx/aReBrX3uUHn747i4aIoYwgZX8ze9+T7esXK4IF5h2ocJfzeDrH//4W5o5c6bh4fL/ACFQzyLoO48W0j83naAX12WqF/7//MtCqq7vWpZigNyUYQUBQUAQ6BGBwbAaBcQDXtDkgIEIwQvZCyA6ZsxYRuPHz1OLHKBBUViQY4RLeUWx0v1ArVxfFimEHT16gHJyTql+TQ3jGI6n7UepLAiMT5o0l6ZOna82F+Sf5IzOi4TyXJYM/fn6+FJERDTf2HuqElyFBblcnrPn3xMc29hYTzU1lWbbN3NZq+4M2SbIxABRYi7bw9yxKOtlzgA/MkhGjkxTZa5QBgvzgBh6LpckQ/krlBYbmZxmNoPEXJ+yzTwCIEHEBAFBQBAQBAQBQUAQ6G8EmlvaeCGG+XvB/vbFFsfTxz0t3C/3xmetL8MS/t0dj0XWq1ffy/HMn6m4JswwTqmRHyAvvvHNb9D3n/mmEamA9siyeP5//4+mz+wkRgxjlCABnvzGt2nZ8qUWXWlr6/nzAS2Pv7zwYrexUJA08OOvf32Bn6/SjcaLiYmhhQvn6OOtmCf8xAL04uIyfvay7ikXMdm//OEFevKJrytSB6bFhxHb1TDDgvRfPv9L+roJEWPdKDrXW1osPw9aBNNGd0gGSA8nBowXamKjxpomMO7rrws0aIcGBvrpv6iooW2Y1oQ2yAAJDfExauPs3PUh0NPT26gN6uEZ2vwFC1Sq1xtvvK5Yx7zzlyhmRASv4pyrar5NnzGD3v/Pu/TZ1h36w44fP2aUchUYGEjPPfcczZg+kz5Y8z6nWx1TbdMmjqUVK5bTsmW30IdrPtQfjwAD/DI1XGD+9Y9/sojqFtq8bRtlHj6hgjCYP/qaPHky3bZ8OcUlJJgeKu8HCIE2DmQ1NrWpFRDNrbrgWHOLIzU0dWrLDJBrMux1ImDuhxI3En1lyCTDq7W187OC77obBx97Gge+9dTG1E9Lx2jz7E1/lvoyHVPe2xcCfffp7v95axkXFy9kqcFjhydz4D24iyPOTk4qo0MT7W5u6SQFmprqVYbIjs/WqOA8iBIQHxBB/+yz95kQCdbfT4BsqKurUkLrLS1NFBkVr7JNQLZoBhIAOhoBAREq2wG/56Wll7r4ZLgBx+OYEcNT1Kql4uJLdOLEPi6HOUVltHRn8Gffvi10+fIFGjt2Bt+nTFb3Wj7efvzXmUtQ1bNOh06k3LAf+IksE40gQSaIplvS3XjYhxJYlgz3OsNHsP7E8DEqAwZlsCKj4vTlr+ITJqoyZd3pqljqW7YLAoKAICAICAKCgCAgCPQvAkoEPS6Inlg+Th/7gAdeHm4UG6bLRu1fj2x7NDwVeHKGg0ZAoPxrgK/fNTmNvjzcPVWmgxZD9TPzrGPYOSrGPP2tb6vF0xs+XksHM76knNwCdTxinpMnTaZlS5epzA9kYpgz6BK/+drr9MGHa+jzTz+lEyez1XPSuLRUun35bTRv/jx64YUX9HMMDvM36saNF4wHBAep/Zh/eCj0CLs+P0yYMJ5e+evfadOmzfT5ri/0sVCMlZwcrzRNbl68yGwsFPP82c+e5WeO4bTu4/UqpqsdlzYhTfnjxIvTND/wHn6YM5Agv//DH2k1Z8RADxqYYc6I04wck0hT0tJVfBeVgEzjJzhHbm6uFBUVo579YJbOt+HnAu0s4W/OR1vb1vVs2pqHNuAPBLx37fpM74kTkxf+V0kQZ2cXeuZ7P+KUrW/p9/v5G39Aob8Bxu2hBx+y2AYfyAceuJ/uWHW7vo23SdkotMGXDTXf6lCuioOR8AUC5xgDdvvtq2j+PN0KTrx3N7g4aMFDfOnuuHMV3br8FqpikUoYxtI+yPUNDXr9DgQi8IE3/cLgGJApKH2FMQ390ZFA5i9KajCxAUHAzdmR0pND+ALnpIgQGP6PDfUi7BOzPwTwnS4tKWa9gGxqYYLC1Fz4Rxy1JJ2dnfia5a9e1v5goe/KSi6ZdvkyaxHk8A1IDl8vSqm+0UHV5IyKjKK4+HhV7zIsLKyLxk8DX0dyc3MJNTdhaBcZFa2/Vpn6qs3l4tWstXjuG9cYGITPCgvyVV1NzDMsPIyDkXHdzsVw/J7GNvVF3ts+Ar1ZtWJrs2ltbWLNjk/pvfd+x99Nb17x9E2+2V/aJZOhlVchVSND4qoouquLu5oKCJTTpw+zXscH6qFg7vxVrPF1vyIfoAeyd88GpVURFHgn3wB7MnHZQidPHaR/vfZ//H8t63s8xanWdyqdDc3AhUDwG3og6oGHMyFcXXQlirrDD9kXIA6gj7F71wb92PQabAMAACAASURBVHPnrLCoQwJyA/58+OHLKruiqblekUDuTKhGRsYowgfZJCUlBVzmKs2IdACZU8GZL6WluSrFOzJiuDrOGrOUAYJjQeaEh8XQiLgkRYCgDFZAYBjlXTyrHgrGjJlqVWkxa/yQNoKAICAICAKCgCAgCAgCNxYBxK+igr0oIqjzflcbsTcVWm6sl7bV+12r76Kbb16idwqxRMQ7r8UWLlpIu6ZOMerLXEzRsG/ENLGQGlkTdXW11MjxBBj88PD0shhHMOxjBC/AfoYr5yD+iuMRL/XiZwZNFuDpp5+mxx59VB2ixVK14wODQ9Qib8RZze03HCeU4x9fYa1jYIbnMS02a00sFMea+mi4uLQ3fmBeIH5AHJlihgX13WEOYmTL5o/10wLOWOBqauY+F931a3q8Lb0XAsSKs4EPFT6k5gwnHoRCTxoXfdUGPsAf7Qts6pMlXxFAfOuNN+jgwQMUn5BIMczGxifE05ixY40uJGh37ly2vlsEIiDA3p115093x8m+/kXA28OFpo+NpIks+gVz4lURbVwbE/UwsUJCzP4QQGBz//4D9Ovf/VqtHkDgUjONycePKVY3JCQm0cxpU/iGYirXuo/r9pqFbI/s7Gz65JON+tUThn1rYyBIOm3WFFq96k6awjc4IIa1H8OiS5foN7//ncpIgw8Q6nr869/gAGekWaCRKafNxYfL6fzoJz9VP+Tor6G+jrPNNtGLL76qVqbPmDmFnnzyKf1+cx0W5OfRSyxCtm3bF/Stb39DEcw9XafN9SPbbBMBe84AgRA3iIOG+nrOmsimffu3qGyDEUwCQHgbhvJQZ88eoSOH9yhR8ODgOPLzD1F6GZeL8jnLY43Sqxg1Kp1mzljOv+kJKlvzwnmuBbvhdUWOREXGc3bFdP4OOfF1XnczeybrFO3ZvUntGzUqTREksBYmWc6dO8H3CF+omrEjWBA8kttYYxgbYuooHZWff5E2bnhDHTZhwlzOfg3Xz6mNBShRtuvkiQNKa+TsmcNMOCRSUmIarxLDA5YbkympaiVSQUEenc0+TOPGz1TluTSrqCyjM1mH1X5cf2JiRjIR6sPp4j2nrHeXAYL+0Q98wfiYC8prgaAJCx+hRNLNCahbg4+0EQQEAUFAEBAEBAFBQBDoXwTUYr7aZiqqrKdWjnlo5uHqpPQ/fL1099z965Vtj4aFktYuluxpJtfTl7UxVnM+4BkRwupY4G3u2V+Ly2qL6QyfKUHAaAswzfVtbtu1zrO7OV6LH931Z85vbLMUOzZtf61zNO3HFt4LAWILZ6EffADLXVJaTC+//Io+uwOpXX/4w29p2vSpKs0KQY9PP99OH7z/kd6jkaMSKDEpsR88lCFuNAIIMKPuZXW9TixXG8+z/Qo5ebmQA9lzSPFGo2e7/TdwoBTkR1lZiUozNU3lrK9tUfvPnD6nyIjxk8fSvXeu5hXji40IC22GID8OHMigV//1T9q4/hO1GQFBpHOGh+tEmiuqq6iksFClWH70wVo6l32WHvvqY5xVdqvRTUNFVQUVFfEqbq5tuf5jTxo1egyX2rvN4o0V5lJaVEn1ni1dMlpqaxtU0LO8vIS2b29QZXxA5ISHR5hd2dDS2kZ5RZdUWZ76+lrbPYHi2TUhYM8ZIE5Ojvy7OoEmpS9iYu8d+mLHWi4x16Q0OLw8dSn5xcXnaceODXTi+AEKCgqh9PQ5Sgi9traSDuz/hDNItioCYMFNdyjxbp2ORyiTgreqUlggR0CSBIdEc5ZErMqwmDZtkQroZxzYokpMzZq1lEteXf1OV1xW5Af2IbNiQtoMCufsCmtKPoEYmJS+gDMzLtG6j15RPmOcyVP308iksfox6uqrmfQ4qnzH9xLkB7JRMG8tGwVlpiZPWcLf9ZfZl82sMeLP2RezeNWXjyqJlZV1kHbuXKM+M9Nn3KJwBJ4tVtR07i4DBP2hH2TOREWnKOF5vGApKZOsxkIdICYICAKCgCAgCAgCgoAgMKAIQAQ9M7uE3vksixpbOrXxIgI96d6Fo2h8QtfyswPqsAzepwj0FNnqaX+fOiOd2QwCQoDYzKm4sY6AEZw7d57SMsk+d1oNVsBCO4888oiq3a0ZgqiaYA4EzletXMVB1Zgb65z03i8IQAR9x5F8Onym1Gi8lPgAWpw+XFZB9MtZuHGDIEj64EP3MrGxSJ+qigyRstIyKryUR0ePZKpsiO2fbKf83EucIlnD5evuNCIssFLm1KnTivwAsYFrw3LWF1rJ14ExY1LVSgoYBMLyuFTV1q1bWZPoHTqReZL+8c9/qBJbSzhtFqsJQHp48qpziCPDzudm86rv9UykjDZbh9JaZCBghpJAn27fSpMmpnRLqGB8ZLGJDT4E7PmmFVkcyIxYsuQ+JqWbKIMzQD7d9j7t+uIj/ecVCxJgIDVR4mrK1KVKb+PEiQz+Hn9AFZXldMstD9GUKYv4+6YrOQmyIiEhVREbKCG1a+c6Gj48ia8J93DZuiCaPWcli45XqDJVhw5+Tscyd+vHw3cKmSa4jsyavYLmzbuDyQdjvTNLnyKMi/7nL7ibyZMAlX0CAuaTDW/Sdi6jqX0HtTFAsEyeslhljYA48fLqrC8cGBCqxq6rrVX+v/vOnxUR5MeaJlVVpYpYgc2Zu5KPv5PLVkWrrBhrDBkgKD+GbDZkpZka+gHJNCp5rCI/QLZCODA1dRp5e+mufabHyHtBQBAQBAQBQUAQEAQEAdtEoLaxhS4U1VBFbZNy0JnvWbEgtLa+58xh25yReCUICALXg4AQINeDnp0dmzZxEj338/+mZ3/2Cz0JUlNbTXiZWiAHAR76yqP00EMPmV1dbdpe3ts+Ak0sfJ51sYo+z2QdhVZdGqiLswM1co3DOeOjSaTAbP8cduchAnrQ5UhNTTWq3YjMn/a2Vlq6tIxXkU9RRAUIi3+98SavDI/hFeQL9CX1oPmxbdsWlSkC8uMuFtR6/LEHWMArqUvNzdGjU1RGSACXvfrjn15UWSZffrmfpnIpLK1kYD2LNqOf1JREulxcyaV3DtC2tC0UwRoelsoKdjtHDgAjGwVCYAe/zOwTQqW78WSf7SJgZczbZieAEljJyVz6iTMboC9x/Ph+KikuUDocMAT8IyNjaeTINBo/YbYSLW9ra+FyUm4qk2Pu3FsobeICJh4CjcTMQSbMmn0buTApculSjj77AosgUKpq5e1Pqj4xXmnpZUUqwECuRPP+pJHjVekqEAvWZH9oAENDAz5CWwTloo4f30W55892mVNwcLgiZcaw8Dn8MdQhQV8YE6XAbl/1hGp35uyxq33UKx8nT12sskowd2Sxaj6izBeyN+6+55vKJZQUwzZDw/uRI9Pp3nv/i0kXH5Ud4+BgXP4RZbDQNwz6K0GB4VezTK6t/rGRA/JGEBAEBAFBQBAQBAQBQeCGI4DnBCdH1ndj/Y/poyNVvEOzqGAfCg3oWefuhjspAwgCgkC/IyAESL9DPnADopbc6tX3qBXYCHIePnyEy0nkqLIyMGgGxIyIoMmTJnMQYyHNnTfXYqmagZuFjHytCDhxoCfA15ViQo1XsuImAPvEBgcCCAgi2KkZSps5Oboq7Y3bV92uNoOwAAmybfs2SklNYYHyWK6hf4VL5+QozQ9kgt209CZafdcK1g4ZaZYExRiokakJpRUUFtDUKVOVSJmpJTNZMnqMB/3n3bdp3cfrmRAZY0S8mLa39B4rt6PjImjezDlU01CvCJWNqRtYByC61/U6LY0h2+0DAXvOANEQ1gX7RyoiYMaMZfy9u8TZTboShSBGkI0AQgPkAgykyejRk9QLpm03PGPYFhIcQbfe8hXOjNDRRNiGfzEeSAOQG9OmLaYyznBAWSmYNh4IgN4QH6Zjg9BISUlX5E41Z5tUV1f0egzMc8TwkcrXWs5U03BxdnFRxBCyREx91LAcHptkERu0mTBhFo1nXRGYOfzQJilxHGd+jNVPzVw7w3nL/4KAICAICAKCgCAgCAgCtoMA7pwdOfaVMiKIhdC9uTJB59IpVxdH8vWUCgG2c7bEE0Gg/xAQAqT/sLaJkRC0nDABKzzHcymMGmpsaOCSV81637y4NAVelkTWbWIS4sQ1IeDp7kwLJsbQuIQgo+P9vNwI+8QGPwJeXt40b/48rvV/QJWkyjiUQRcv5vFq6WiVJZKTc45ycguUBsCsqdM5iJlslvwwRCowOERPrHh4eukzRZy43IxmIEpAjpzNPUH7du6n99a8z6V64iySKz2diUmT0qmBr11//9urtGnLJkqbkHZNhEpP48h+20XA3jNADJFF0D3AP1iVkTI0c4F3c9ssnSVLbTGet7e/Uekp9GGpvaX+u9vemzlZ6gdEiCkuPfnY035r52lNP5b8tvXt0Hmq4xJrzs7O6l5P7vds/YyJf4KAICAICAKCgCDQWwRAgrg68z22jzu1d3SKoDvywk8XJ1n82Vs8pb0gMBgQEAJkMJzFa5yDD9fzxwuGYNJgWFF7jVAMicOw2mF4mA9ngHgbzdeBVwcPM8gYGBJgDNFJggANCwvjkjrjaP36zapsVS5nfUyaNJEaGxuZAMlV2gIoMzUyKZkDpMafFXOwIbNMu44Y7ocGiKGNGj2KVt26UumP7NvFpbAmfkZhLGBu7lhz4xhuCwjwV1onBw4f7BNCpafxZL/tITAYf6/6O+jeH+P1xRh90YftfYIHzqOioiJas2YNa0OV07QpaTR2bIq6FgsZMnDnREYWBAQBQUAQEAQEgb5FoK29g87kV9Le45eosblTBD3Ax5Vmj4vqEhPp29GlN0FAELBFBIQAscWzMgA+DcZg0gDAaNNDNrewBkheBV0u1Ynras4G+rnR2PgQcmOCRGzwI4AgVwjX4Q8MClLl7ypY9wOi5m1cG7W8vFwJjENjwz8goMfsj+7QMswAQTtvb132SeaxTHrzzbdpw/qPWPtgDIs4T+71CmQXZyeK4ewUU0IlMIjL43C2idjgR2AwZYAM/rMlM7QlBJqbGikjI4M2fbKZPl4XQfFxUYQyhTOmz1RkCK6jkh1iS2dMfBEEBAFBQBAQBASB3iKAslfnmABZtyeHauo7K56gHHhStJ8QIL0FVNoLAoMAASFABsFJlCkIAtYgUN/YSjuPFtLWQxf0zd2Z9JiUFE6xYb5MgHTVbrCmX2ljfwh4enqSp7eLIjtQSgrW1t5OLc1N+smAsLgeM80AQV8otbVs6TLKOnVSiZivXfsh64/EKA2S3ppWzguEygfvf3RdhEpvx5b2A4+AkPYDfw7EA/tGoKa2mvDKPnea9u7LoI0btigyJC4+nsIjIlVpweSRSeTr70++fv768ob2PWvxXhAQBAQBQUAQEASGCgIuTo6EeEdziyO5uuhCn3gvJggIAkMTASFAhuZ5l1kPQQTauPZlY1Ob0QoI3Aw0NLUOQTSG7pQhdg6rr9WJLd8oJEwzQDAOymVN5oyPW5bfprRGEHAbN3Yc3bX6LnJxdeu1KyBUVq68vU8IlV4PLgcMKAKSATKg8MvggwwBjQw5fz6bnL7YQy4uzlye0J9SUxIpffJEmjp1ltJtQnaIv//1ZQcOMuhkOoKAICAICAKCgCBggwhA5yMlLoieWD6Omls7S2B5ebipxZ9igoAgMPQQEAJk6J1zmfEQRcCNRcDSk0PI3c1JESEw/B8b6kXYJzY0EOi4coWqqqqpob6enF1cVTAL5sTCyBoJUdNQT7W1tdcFiLkMEHSIzI2FN82njC8P0sb1n9Caj9fSuPHjaTRrhPTWQKiMHz/OLKHS276kvX0hIBkg9nW+xNueEYA4OV6tXI7wRlpVdQ3VtzSaHQLXbfVqc1LZIUVFBSo75LXX3lVkyIxZs1VmSAxn7oWHhwsZYhZF2SgICAKCgCAgCAgCA40AtC+jgr0oIshTuYKSWE6OuicIaKCKCQKCwNBDQAiQoXfOZcZDFAFvDxeaPjaSpqSGGyHg6OBAWCEhNjQQaKivo9zz55TYeYB/IIWGhOo1OMLDwxQpUlpUSYWF+Xyj2HHNZU/MZYAAYdyMxg4fQbdzFsi57LN0NOMYbdy4gSJ47Gsxc4TK6JTR5OEhJd2uBU97OUYyQOzlTImf1iCAzLzMzEzavXu3viyhNcddS5vLlwopP/dSt4dqBDb+gghpYFJcI0OQGTJ+8liakpZOY1LHUHxCPMXExChyG9d3MUFAEBAEBAFBQBAQBAYaAdxbVdY2U36p8aI+Z457RAV5ka+X60C7KOMLAoJAPyMgBEg/Ay7DCQIDiQB0QOpNSl55ujmTk5cLOZAELgby3PTH2LgRzMvLo8OHj1B9XR2lTxynglfOzi5q+LgRCYoUKS6+RKdPHafy0vkUGtY9MYE+8/Pz+JhiioyM5PbhijSxlAGCcSDEPnXqFLp58c3097+9Spu2bOJa86Oouan3Zbk0QmX1qjv1hMoGziqZPXtBf0AqYwwQAnK1GiDgZdgbggAy8zIzD9PfXnqFr6cXb8gYhp02Neu0n6wdSLuea6WyitYX0M7tX1BoaASlTRxLd/D1d978uZIRYi2g0k4QEAQEAUFAEBAEbigCLW0dlJldQu98lkWNLZ0lsCICPenehaNofELwDR1fOhcEBAHbQ0AIENs7J+KRIHBDEKhtaKHdxwro8JlSo/4TY/zo1ulxsgrihqBuW52WlhTTJ59spD27D5Cnl5cqZxITE61W7YIESR41kkaOSVQBuE937FQkgh8L4IKwsGSVlRW0Zs0a2rZls+rvgQcesErUPJgzT5Ytu4UOHD5I+3bup483baDEuHhLw3S7Hf5NnJhGK25dTn9+4W+0/uPt5OXt1+0xstO+EZAMEPs+f33p/RUmD4YNglIGXl4+FBzmT42N5stT9SVmZWUl1FsSxHR8kB/4vUhKHMG/H07UdoNLd5mOL+8FAUFAEBAEBAFBQBDoDoHaxha6UFRDFbVN5MzlnmHNLW3U0qorB97dsbJPEBAEBh8CQoAMvnMqMxIEzCLQxOJfWRer6PPMPP7R71BtXJwdqJGDFgvTY0mkwMzCZncb29vbCVkZWFGsWXtbKxXm59NH69fT229/QDU1lTT7pjm0aNEi8vXzV81AgqCMyZIFi1VZqhOZJ+nNd/5NPr4+NG7cOEWQGJY3wRggVDZt2kxvvPEOFRTkUfLoFKvxQl8jRybRvXeuVuVY9u06QCWFhSr45+Gpq9VqdWfcMDA4hBYuXKyyW7Zv/5zWfby+N4dLWztDQDJABv6EDRTxgGtcQ0MNXS7Ko4qKYgWEq4s7RUbFk79fIF+rLBO2A4+aeQ9Qi3pK+mTy+K4nNTTWq0bOTq7U2tbc5//nMcG9Zu0a2rt7r3lnzGxFSUMnJxeKjo5VpMcELjM4duwkRZrjd8PD04swBymBZQY82SQICAKCgCAgCAgC/Y4A9D7CWf/jprThVF6ry3x1d3amQD938vNy63d/ZEBBQBAYeASEABn4cyAeCAL9goATa30E+LpSTKiPWvkAc3VxYnEwH8I+MftGAMRBbu4Z2rtnL4uZdwYAKysqKCc3hzIyMlTmB1b+zp0zgx598KuUmppqpPGh6WmcO5dNb775thIpr6iqoFW3rqTp06dSWHiEAgkivTk5ObR161b64IN1dD43m6ZPm0zLli6jsB5KZhmiDJ2O6dNn0IKbTtLr/3qFivfp6tIHBgX1+mSg7BYIlRUrllNWVo4icGDQNBEbfAhIBsjAndPW1maqrCpXDvQ34dDUVE+nTx9mrYyP6eTJQ3wtqqWG+nomcsNYmDuJpk1dTJMmzWU9Cj+7ygoBcRCXkKBeN9rOnslSvwc9mUZ6BAWF6DU/UKpQIz18fHx66kL2CwKCgCAgCAgCgoAgMCAIYGFGyoggjnV4KwF0zUCMBPi4D4hPMqggIAgMLAJCgAws/jK6INBvCPiyzsfiycMpfZSxpoMPi6Njn5h9IwCB2tdee1eVfzI0BAdBjiDrA+K1K267lR584EGaOWtmF6FwTU/jwQcfopbmJlq/frOq837meDbFx0WRr38AuXq4U3NDI+WzSHre+UvU2NSgskmeeORxmjxlcrflsswhHBkVTSuWL6FTxw/S9k8/71Y7RDveUtqyMaGSR+XlJeTjLblN5nC3922SATJwZxDkx9q1L/E1oplW3v4kRUbE9gvZAOLlaOYeevedF5gEOagAgGYR7Pz5k3T2zGHKOfclkyLNXL7vVnJz630m2cChOvAjg/Dw8PBUpLG7mwelpiSqsoaa0Hl4eLhofAz8aRIPBAFBQBAQBAQBQcAKBIbx4hJXZ0ezZb5dWAhdTBAQBIYeAkKADL1zLjMeogg48gr5qGAviuBUUFPDPjH7RMDD3ZNiRugyMzCD8rIyo4m4u7urQFZcfDylp09RmRyxw0dYJCqgpzFm7Fj67ne+S6NTxtDmT7eoklgHv8w07pcDZBgXQuYopYVsEhAQmrm6uVNMWARVjChTQTNLhsyN8ePH0e133E41DfWKVImOizDKYsGxAdxHVFSM6tPDzXLaskaoFBbk0OEvj6lyWv7+QoJYwt9et0sGSPdnTpPEQCU8U3kMg+p4qhPT/VrPpu2wHWWvSksK6cjhPUwweHApqmp9H6bte9Nv97MhQtmr8xeyuOTe24r8wLVg4cJ7OOsrXR2am3uctm37QJEgn322loaPGEWJCWP7hZjpyXdb3q+RHiDH8TuRPnkijRo9hnx9/DijJoaE9LDlsye+CQKCgCAgCAgCgoAlBNrbO+hMfiXtPc4L9po7RdADfFxp9rgororhbelQ2S4ICAKDFAEhQAbpiZVpCQKmCDS3tFPOpSrKu1xjtCucSZHkmAByc9EJg5keJ+9tFwHociCTIz4hnlc9mxdzgzitC798ff3Im0uWGJIUlmYGUmI4C5I/8MD9THAsotzzFzj4eI4qK6uppaVVERpRkZFqXATIoCOCYwwNpbBAojQ0NVEwl7RCjXhLhtJbt99+J02bNlPNw9PDTZXb0urJ49ibb15Ck9InKfIjMlon3G6uP/gxadJE+uUvfkHVtXVKnLen8c31I9tsGwHJADF/fkBQNDc3sI5EA2tIOCmSoomztKprqlVmRFBgiFF5KK2cVX19rVGHvr4B5M2i3IZ6GmhbW1fD14OTVF1VRMRlp6DBUVlVRh7uHkyqeugJB02nQxtX69zT07vXZbNApLS2NtHxY3so88gXKuvjtpVPcSm/FeTt7a8InISEVP4/QJEfwcHhariOjg5yvCp4aR6tobkVpEdggD8TRKNUZh9Ij4kTp+qv515eXl00n4YmUjJrQUAQEAQEAUFAELBXBFD26hwTIOv25FBNvU5TDXNBOfCkaF7oIQSIvZ5a8VsQuGYEhAC5ZujkQEHAvhCo5h/+Tw/l09ZDF4wcn5UaReGBnkyAdK7et6+ZDV1vQRAEBgaqV18b+kaNd7xiRsTR9Obp1NbWSbIgUwQvS4Z91taz72keIDVCmVDByxoDyWPt2Nb0J21sDwHJADF/ThD0h0bGMSYLvJjASE5Op+PHd9GOHRsUGbJixSM0c+ZSJWhdWnaZTp44QPv2b6G8i2eVngbM2dmbEhPHUlraHBo3fiYTDsEqA6Og8ALt/GItHTmym4qLeTUdl9ZDRsbBjO0cRL+Jxo+bocpOQacjJ+cUHTr0KftyhEpLc/X9ajodKalTmJgMtypDA5klZeWldOx4hiq5N5l1PqZMWaTIDxj243+UvRo/YbbahtJ3Qn4oKLqYJxMcixYtUa9RLGIeGBTMnxWvbq/nXTqRDYKAICAICAKCgCAgCNg4Ai5Ojix4bvy86m/y3sanIO4JAoJAHyIgBEgfgildCQK2jkBjU5vRCgj429DUautui38DjAAICCcmFRB0vtJxRZ+ZMcBuyfBDGAHJADF/8js62pioyKHNm95Q5d/OnD2mdDFQGi8sfITKAoEha2PTJ2+odiAysC84OE7tA2Gxa+c6OnVqD9XVP0ULb7qTSRE3FhtH9kcWXeAMkAYuVwc7m5VB1WFxXHJqtHoP8kPT6UA7lOAL5f0gRqqqSimDyZbjmTtpyc0P0s1LH7SKBAH5UlqSTwX53B+TOGPGTFVZJFq2C8Z15KwGdy4HiBfMtByX2iimEAgMDqGFixYqgqg7ElvgEgQEAUFAEBAEBAFBwF4RgM5HSlwQ3UfJ1NKmK4EFQgS6ILFhUh7ZXs+r+C0IXA8CQoBcD3pyrCBgRwi48Y99enIIB5CceBVtm/oLS4z0JewTEwR6QgBBZwjKiQkCA42AZIB0fwZAalRUlqtG8QkTuWTUVCY4Ilg7I0ltyzy6m3buXKPaLFp0Dy1Zch+FRwxX+y6y1sZ77/2FMg5soU+3f0BxI1I4kySNYmISuBTdvapNzRcf0Yi4RFq69GGKjEpQ/YKEOMuEy0drX1U6HaNGpauME2R7ODk50+VLF7hE1fvc5/uKeAkIDFPkSk9i5SB1SksLFYkDQsXL01fpgRQW5HImynku+dWi+sL84uJSubxTqGR/qLNk3jRC2/xe2SoICAKCgCAgCAgCgoD9I4AKA9A/DfE3rnLh5DiMHCwJ1dn/tGUGgoAg0A0CQoB0A47sEgQGEwI+nO45e0IUTR8baTQt3ASICPpgOtMyF0Fg8CMgNFzP57i+rk5ldaxY/gglJY3lzC0nfjkojRBkgnh7h9KECXE0f/4qRXA48ao4GLQ/CgpvpqzTGVRclEuFnFGC41FmKmV0OmVm7iJnF1dVKgsltrAPJbXq6qpU2SscB50OkB/Tpi1SBAeeM/39gvg4F+7voiJXDh3awePPZT0hXdaGpRm1t7dRbW2FfvfZ7MO0Zcu7TNQcV9tA9qA0FoTRJ09Baaf7KSY6QUgQS4DKdkFAEBAEBAFBQBAQBAY5Ah1ctaC0upEul9dTi4FWpqe7C0UFeZGvDZTCKi8vpw8/fF/pbIoJAoJA7xGIZm3YFStuMcVxHQAAIABJREFUs0rnFr0LAdJ7jOUIQcAuEWhv76D6xlaq55JXEAVTFwAmP1whkO3l0kXE2i4nKU4LAoLAkEBAMkB6Ps0uLs6chTFBZW64MGGhlYVCOavRKcgI0ZHhEBAHMdJ2tTwAiBJkUwQGBVFBQZ4iH0BCoA+Ym2snYQFCA+THMGY4IHiOElk1NZU0bsIcSkyaoM/uwNhoA2Ji/PgpiiRBSSuUtgoPi7ZIVqDMFayOxddBcuCVcWCzInZQRguZHy1M6KDUF0prbdr4Fr9vppW3P0mREbFWaYz0jKS0GOwI1LQ4qCl6ctk4WRU62M+2zE8QEAQEAUFgKCDQwfeQJ3PKaO3uc1RZ1ymCHsHap/cuHEXjE4IHHIbqykr6za9foOxzpwfcF3FAELBHBJZwWd9Fixb3LQEC9tRaw4VGM9OHCMN91vZnqZ25vrVtSHcz9dncNnN99+Sj6bjm+rBmm6E/GNOwX80Hw/mY9on5aX2gPVbwW1oRa4iF4bj4H6b1pf1vOhbe94Sf6RzM9dHdNtP+tfeGf02PN+e3ISbKbw64mPaN7Zq/hn+xvb2tUw/DkUt2dHe+e/qsaP1p/Wh4m86jv96D/NhxJJ8OnymlxlbdPN2dnSkxxo9unR5nE6sg+gsLGUcQEATsGwFLv3f2Pau+9d7Hx5/Cw4cr8XNDTQxoP0RFDlfEQ2trE10uKqBz505QcwtnUrC2B7JDzl84qUpOwdDGGqtmjY/iojzV1Mfbj3VCqqmk9JLRoS0tLUpPBKWs0D9KW6HElSXBcpAmra0t+j6Q1YLMldtWPkITWaTd2RnEzhUqvHSRNm4Mpg/XvEx792yg1NRpFBoSrvaLCQLdIXAkK58yT+dS/PhZNGr0qO6ayj5BQBAQBAQBQUAQsBMEsOCztrGFsvIqqLahM8bT3NJmlBEykNNx5FiMmCAgCPQfAj1mgCDIfPnyJaqvqyVPL2/yuVJCFe0BykMnJydeMdhGXl5evDqvjioqKnmlYK3eez+nVvIKieHV5m1U39Ckf4htatIxsFilh4fhRl8Hcq/u4NWFLupYbMP/1e5t5NvopP4Gd3iqVX0urq78MI/SCy7k6eGm2qNvjBsWGkyubu4U7OZMlW1XqJnLIniyb20soOnvNIwKqmrUflhzU6P6Hz478wp4WCunxuFBW/Ov0dVbbXdvrlVjerAIMNpohuM83NwozFtXVxBjOnFgoYn9BDYw+IALmxv7DcPDOwzHFNU2qP9hrhwMKCkpVf9jLnX8iomNIX9/f9VngKcOmw6XzpWXDi0cqHB0o9zcXA40NFBoaCivitQx2c7tjDfj4swY1bRc4UBDKY2ICCHIP+E9DAEInL9KZp5xHI6pqOf5s/8Yt4jPO845/kc7WCsHztvwYky1ubXz+4amJoWFZpgPDL5r5j2sQ/2r4aT1gbY4P+2txWq/o3Oo+h9/NcM8DA1z09oY/kWb+rpG1bSmwZF8PNrJx384lXLWA/xsqymnqrZh+s8P0iFxTnPO5VB5dS3V1+jKbHj6BFB0eIj6HMbERPE8jL8qZRUVVFZaRm0OxRTmGkZBYe2Uf76Aqh2Mgy2+HV4cgIqh2JTxAy422tTaTlkXq2jLoQtGWDa2RtLC9FgSKTAjWOSNICAI2DAC1i/LsOFJ3GDX8Bvv46u7XzMdShMrP5ixnXJyTvF9D8pidd6/NdTXU1lZibqHssZAQtTWVao+IJB+6OBWys8/Z1bfA2LoxcWXVBktLbvEElFhWqIZ/kydOl+V4tKOAUkCMmfmjOV05PAeOs/i6ydO7KNx42dyKa6BX91nDX7SZmAQAPmxKzOXAhLS6a7VrIMTHqEWzYgJAoKAICAICAKCgH0jgEoX4UGedFPacCrnuBsWfsIC/dzJz8s4tjQQMzVdsD0QPsiYgsBQQ6BHAgSAINBdVV2jsHG80kjVFTnqoRY0hpNPIBMkl1UAGYF0VzddoN7Tr5kuVzRTeJuzIi6qqqqZJKlV+x06aqi8rYbqiq9Q5bDOALmfh+5BvaqhgsvyePBFyo20/8O9PKiNyxyEhoQyEeGuAtOXmShBQL6hkYmAdl3AHaQLKujBv6K6Fgrj0j6w8qZ6RWyAyIDh/yYnL6rkQLbz1Ysh+sIcYPCz0qWT7Ai4Ek4+7mVUVtlMzo6N5OHuSeE+LuTiF0AFHFRHoB0z8eZg+yUmVTyvdKbZefA+hk0F5BF0gJ3I0/miiX8yHByEKFD7EIivau5QY6WNT1TbvIddDWJcJTXURjaQFi5MxGRdzOfjz9GYMWMpLjiA8q+SK65XSZm8vAIdmcFEQzXvA+ng4zKM2rgKGggXkFgMMPeoKwOAbUXFpRSFwD8fgw9K7RUukcHEB8iQ4mIdWaGRQiW8QNOfz5vyCeTUVdJLI6mwvZ77CnarZfLJm4kGnTgrtmPu7BSVuzqo8xITAeLKnWoqL6i/sHY+qSA2tPeqP/W+WP31b7xMNe7N1NCh86G2tZ6qLjvQZW6XNs6dqgt15/Us/63k4EtkRAx5En9uXPzVZ/PjTRuooqqzxng7/zAGe4ZQSnTk/2fvvd/jOM9swbcbjc650QAakQBzDiJFKidbsix5NHIYT54767s745m7e/+K/WX32Wefm37YZ3eeO3fGY8+M7bElB1mylRMpSqRIijmAABG7AXRC57TnfNUFNCGQACMAsl4J7O4KX331VnV11Xu+c45MRS+oPgRE+6G0+V1ybnRKkvlx+WLklGwJdUJHslWGUSyaSdmlEBuXWrBJWgIeOfDIw+qYkwWy3GGBxEnQZ5Pu1rmCls1qkRDAOM4zwsiAkQEjA6slA0aJculHipJWjcHfpI8/fkNeeUUzK3eAIbIGRufr1+9Q91YcaDI6emXWW2PpW5pbkr4c6fQEfv+0gSTz29iwcY/4/WEJBiPzZ131mcwV3lPYbNq9HPva1tanWC2NQQZJuLVT2tp75NzZIzI5FVVAjAGAXDe99/XMRvDje9/7nmzZstUAP+7rM8LYeSMDRgaMDBgZuJcyQCWPrX0tMEL3zMp/c/8IjAS9Wo1pOffXGHCxnNk3tn2/ZmBJAAiL5X4LnkIzSUmpMn89XD7F7hgfG1ej6N1gC7S2tWLZksaiwHOtHe+jqKvrLAvqODOaxSu1MGQVJssK7CiU5tgQevMEP4oTKFyDCJAo2MVvAwMDRXqQHRTwoRgbqmgv4rRVFdDCbSmWCEb5K+ClPCOpCrbWVFLgRzSjsRDYg7HCMHqhPUTzoZ9tEvjwef3SEm6RIYAKcbPGuCjmZ2QS3SX4wfldAbsGoqCQoIMYQymLdHpq4vO4wYiwKFYGYzxaUTlhsMDvB2jC4HoEkMgmkJkpCTurqiBfNnllBvs+lowrhooG2gQVY4Rsko55bAgyOxhkMxCIIiDC91YwRIBxiwXgRqvLrDFMWrXRkGT0ANFR65Hh4UAecxbbLDIeq0tf8PgS+NCDTI044BAWJBg6I4YsmWaPRwN+0D8yJmbBD5w3ZF20BLX9JgiScnQp1gdBDgJDTRGnhMayMjK7pavf6KwOfao1Bj8LdP/iheOydt0OOT1+XExmu7RMI5/BKanFKdmRA8ulVwEkCQBxGthlwXnq1dhGyA/PCyvO2aDPPwuABHl8XCjaZNLid5il3Y9jmAAoBuBjPDElwWQUIAqhFZFWM44VQI+mjgBGEqSkF5rjjoc3yPjlczhPesWP/MfjUzgX82IBg2g5gz4fLzzUJw9v77iqGx4YgXHe7YgyfEaymRlxuty37Cmit9XIOrodfTTaMDKw2jOgM+scuKbY8HvHv/stDAbIzR1xMiooefXOu6/I8WMfSF//eshJ/a2SjHLhd88CsL4ZDFYalNOjgx4gNxpOp0see/wlZa7uwu/ttYJSWC2hVtxPXHsUHpklTbyfcAeF4Me1gvvFvttsWltk+TK4PhkiRhgZaMyAAX4Y54ORASMDRgaMDBgZuLczYAKj09bcJC6owzSGxWJWIMhyh8EAWe4jYGz/fszAogAIfQ5YLNeZCyzgE1Do4TMmCttkWcQw8p1BVsTICECDOjuChf4iJJus5bgaZc8oFnUAAh+w9RQ0p8MAQAiCNBcSYBcAhLB6ZgERa1tArdfmNKtCPYOMj2zBDNBDFBihs0AykGwgm4MSWrq81lA2p0CSIkENcFYSiaxa1xSCbBa0ACNe9A/FIy7PYn4zQIAWBwrzYLxwOUGR3Y3Cvs+Owq5qSwNf8nmTYpAIgBHmZGhK2y77R1YEQQgrQCMW/glgVJo90pSbloGxQenDMgoEIYAEJkQK7IeeUFBNYz4dWNfrqsh0WmOLsH1dvovyXo2hy1wRcPChiM84fvykAqK4XT/aZyvr1m6SYRybkZERDcxSS7ZC7zurZKpiZMeAgcH8EfwYGhxSIJAu/WTBdMqcobqAaXM90LW5NVkyAC84qFacF2Xsmy4vJSYW54riLEdxLmnrFiwoToB5EJ3WtMW9dVCH7ZDlQfkrnd3Bz/zTQRC+XkyeB2tEayuW0mATTzNYSW15qUXzksw4cb50q2PzwYkxsQKciMdN0oE6fyTSJpPQg2QmeS6HwNSIRNplOgmUg4EikLs9LPa4RzbtPAAwbUbaZVBi1UGpFEsykE8p2mRvrSxvAp8j4+OzSkx2rV+L+XGAHxrIl5xOyzlrHMfWfU19c22Dd+dfC3xi2oMuaQ1cXUTi6AjeINyOmIpF5eDBQ/LoY49KKBS64SYLBC3BwkriWJCBdOniRRkZm8J3Pi/53JdB0hvegLGCkYF7IAN2BxiStpqE27tkbW+XtOP65cHvFH8jvd5rF5zvgV2f3YXbc8W6lzKytH3hw9bExKACNyhB9eD+r8kjj7wgoeCcVJRuhk4ZrKUGQQaPO4B7CI3xwUElNFf3+33w+Pgyw9Bsrs5Ob/Qnmb89tqubslPSi8yS6ekxZcreKJvFNsj4KGCwgRFGBq6XAQP8uF52jHlGBowMGBkwMmBk4N7IQAUDM89eictHJ0YlV6AQvBZBr00e39klPW0Ls5Tv1t4bDJC7lWljO0YG5jKwKACiFkVBW2c58COxDwIiBABYdGFMTyek4IQkVRm+EwARwMNQ08lCqKXHZQL+FnwwJcBRKltkpplPqyAhWDUWQmkqLhRFquKC1DiWVZfC4jVLARR1vw4yMQhGuARMABa+43WTTNTVC/mimJt9ihVCECOHz05HScZAwtCAk6pE60ZIOvhB8ITSWgQ5WBRnzDjSkFpg/9LgEgAcKJH5YFEyWc3NrQAI2FPNGyQFFgwf+FlwJztCY1+wEeyVO4S+F2UK7JMU9llngDCvBEgYZIKQbVGcGkYRC/tVAFOhaJfoRBReIN3Kr4TgB9kYHshgzQ9dpzsSiSjWyIkTJ2Tfvr2qXbJG6LvhAzth/Nx5uUDJKxw3Gwr09MXguvQsIchB6auRDNg8mM/90I2Z6PvBZQmAsDjNoIFpJuuCR4ZD7Z/uoZKFHbsf+1bFOVLCOcJ2CAqlUJyYiyn5AsyXFHLSE0LhDl4vI23Tsibhm5W2mr+P9PLQfUJODYyq2WEbwJvRCQl3QBrNHJSzF49Ld1+XJAdycnJwXJ2LlLxqc9sU+JFNaqiJC2yORMkKEATHBfsxEU1Jx+atMgbAKJRDjl0hCWt4kgKS8vaQTB8bvKpLgyaLkrnyQzOakSlr3wWTJSC9AHQuT8cUqycxNoFzv7Dso7TzxYpcHE3I0JgG0Og7Ewm7ZVNPUOzWBmTrqj1d+gcPiq9r161V/jI3EmRL0cvmzOmzcvT4GRm5oh2ncLgdEimdEoB0lyMy539zI20byxoZuBczkEpOy+Clafn4gzcU6B/pcMn2retly9YtsnbtWvyOBO9pOReDAXJzZ3W1WlVG5wQ3yKjo6FiL+6M5k3SSJbLZlAwNnZVpsBdvJHyQtNIlqEZGBpUMlcejDVzR2yEjIzY5hsEoMdwX+JZkVG6GRGNbWy/a7leslbPnjsv+A5Do7Jz7TVDtRq/MmrCHwxG07zLYHzdyAO+DZQ3w4z44yMYuGhkwMmBkwMiAkQFkgCboFwCAvPLhRdSc5uTpe9q8sqHbv+wAiMEAMU5TIwN3PwNLA0Aa+qUDIQn4JeQhN8DR8QQcrFYzHpbh3QH5ATIuUhUf5H8gXYXlYlnz7Kg8ShQUi1kxO7KSv2yC/FBACH5wdDdHtTIyI/C1sANICAWE3h966HJX3B6lqlIw7KakVrMloz6TgaIkmYDQpFCAx6Bx5aXhB7MhOVNVgAhZIwpEAaDicWpAB0EQSkQR0BkbHVOMlrJfAyZK41pR2GHRRp8HWlnon0OLdQ8M3Zx9trN1BkTeAjYK2CShlha1jQweysnyaDR057rD2C6njU6kBbV8FWS3JFMJTU4Mnwl+0EQ9CaBEmZNDQopBFgi9O8jcoP9HV0dEmahTwovG6DRg53oZrBcMBpRZ/IULl+Q8wBAafNOcPAcDc7I/kgB3KpOjYO5AEgyFf4IpHRhdTGN0giF81f1gvD6ONE4BNCkrkIDgyWxAUonnil1QpgIYQKN6K3JC0EcHz8iy4b4TBPJ6C5Cv8kmTfRhN9ApZHe2hPTAwx6GCIbru98H2yQYh8DEAibDO1l6V309On5E+ACGMVHQKUlUYJZqoYn81FCMHPxqfH/JYxTGJpsZljWxEQ1aVX55HU46qrAELqRMsoeHpCxKAB8h6ACk8v+3QG5cprRDkwjmeA0g27YO0WAz0j3AAklqaVww9bkqQTOvq7BQ7zpMnN29W+xaJ1JYd/GBekvjhf/PTK/KLjzRPE06jB8gD69rkb761EwDIteVFVGKXEHYAdZs2bxGySpYSBD7Onj0nZ86ckk8/O4lzyCx9a7ZCOmWt0nN3oD0FxM0zoV9K28YyRgbu5QxwBDyB5xykflhMHhm+JEeOnpWPDl6UnTsi8IPaLlsBhtyrQMjSrjB3/wzg9coCuU7+Ns5nKdz93nx5iwQTHAAGGDkYn2dmptV55MD9E1kUOTBsT546LEePfqDuGzhAolDgfY+2zPwWSwDfGLzkt4TCsnnTDjl29F0ZvHxCGZFHwFLivQDns/14YlJe+/U/KA+S3XsekZdf/r60hq+WZZy/DY6Qo1TW7t2PyunTh+XEsffk0MEd4nz6O/itbgGTpApZryvyCYzXaYDuxY3Dho27FLBzt4MPs6WS9pDNgSVGrJwMGODHyjkWRk+MDBgZMDJgZMDIwN3IAKW+I0En1Dvmhlh3hKAAUx9UfTf6cK1tGAyQa2XGmG5k4M5lYElPZyzisljNUaaUh2oMMh4YzRY7RvppcwhQaMXxGh6mZ8QLFkjJ5ldAhBdF5zGyJbLNUm1rEnOqoKQ8nI6gWHwwpIb3RcwegzRVSLZ1akgAgQ8yTKZhgu6q+yhwG5nUtJIu4ih/bxEG6yjQB1pDMj4whJH80zC/DEpP0C5DkFnyuqgHbVMMEKsJD8WmpJLA8rRrvhWJBKSsqH2NNso2k5rHSJsq4qk1SbnFJj6wFGbizRhVCGZLne2h7z+X1Qv6fF8EW4RgTKPJOgGWCAr9ecs4aCqw1AZoQACFbRCwuDIWRcHXp0ABBnNIeS8CI9SzbnyYpixZAUXhdshIMWhOrgeZJJGODlWwOPnFSdm6basyPWerkVoUQEZQMpj/yWefS3/fmtn1JqNReJHPSFNLh3hNWlGDbRAgKeNBvgDpCbbdGDrwo3uCcB79Ysg8oeQVg2CUKrfUkfdZYAT7p+ePPiACVbBs1aNcWcjmIPBh9/VKagSsHMyHrYR40gOqTSvWnTkZlbOQpnLbAcSUgwBEptX7aSyfA3umb02rYrKMQeoje2VCRiBPxTBDBmwSy7lckOvAuZOoS2ac+OC4uHF8aIbuqvVIC87DMg1o0H89EvtyEjnhF2cApueYSBBkBnJYiZk8/D82qu1OXDkn+YxHiq0BdSxxtAHprJwg6FEoXn0cb1fv+EMOgblFm2ORaGxsVN5771355JPDAMjcsmH9XvnKV/aL1+PD9xC+KpBm04tmizZoLGBk4D7LAOV/eC1lsTfcEsH1bpOkZx5F4fkMZBA/lGPH3wAQckKeeOJxBUpSAu9eipXKAOE1MOTzYLAARpulr2bPrpT8B4Ntik0xMTEKycK3lKn4mr4tuGfIA5A+Kh9+8Bp+m7PS3d2rlrk0cAaDJr5QklYM+mgxkolxzDsJzyf+nnrUtXvPA8/I558fkk8OvS6/+uU/qOU2btytrunZTAreIm/Kb177ByVlZbc9q0Duxa7zBE6cTq/s3fsMAJCjqu2f/Pi/gmEypgBzxokTB+X9916RUrEgBx56Xnma2BQQdfeC+5FOJ/A3DWaLd0VIX969vV/ZWzLAj5V9fIzeGRkwMmBkwMiAkYHbnQErvD62rm2Rv/XvVtLsergAinS1oLC0zGEwQJb5ABibvy8zsCQAhIV9SkQxJiEaRJkoekwwxlJaoZwMjBKKwARCaIbOIPuDQfNoP0YVlgFgEMTgqO9cAU+K3iYAHzA39+lyOXlxAxCg8bktpD24qlH1kNVi6NrOfCWgQv4GgZDmOqLrcMxdyDwABtinULUo4KpgOTNYKRjBCICBoIYeyqibhXnIVM3EJiXnA0gz6RRnG9gPGqFAal72Ja2K2fwji6KaimJ0P4AOgBMEhhgERSgJRkklvbDvckEiChJPUivIhdiUYhxoEmFzRXW2oYfVru0Dpa+onU0Qhe3rXhzZfF7lj8AJL+TDACQoj6UbfsbylLSCkakyDXXLJPZpYmJCtkEnngySrETEA9kw7jfZMQQ0lPk5AJI82gZHQ6JTmlEH94H7wzYZ3DbRcn3fCODwj6F7gfC9kvWCvJXfFxRvbVq8zoqkTEA36kCC8kUhswLLTqbrcl5gjCiQI2mXlE9j3RDwyCcHAYrgCOZ5DKyS9vRJ9tz7kLnql5GoBmh0AmgYgTU7Tj8VodZ16rUZgIed23JFJOlKS2IYo1lbitK3ZRtGorYCMIN/TcmsgLlaAv4xFsi4gehTzuQkY4IcmH0NLWBUeMFa2mApy0m892BEa3+dptME0COZGlSeIPT/iA/nxeXXjiEN1semMwC9bo/BeL0rN/3idjTLga1tYNZcLU8VCXqF8+5WUA7s1KnT8tvfvi4XLs4A+HgcIN1+jOQNoUhmu6oYdj1t+LvVX2M7RgZWcgb4HaFPAr87wUAYzMGgKlSzYE0g5NLAj+T5556C18OD6rf5XhlttDjMunxHjbKVHs8RsHIuLspuuFO9vJYMIc+VSHuPPPbY1yEXdUkxKtJpyEiG+xU7k++7urfKCy/+uZw7+zlAjL9XjA7eYzz33B8ChHhSOrvWSVdXjwxcOi8//9l/U4DJvn1PwPj896Wnex1M1b+nduvM6U/khz/4PxTYQrCOTKXxsQF1v/HMV74jjz/xsgI2lnKdJyC+du0W1Tb7yX7/y4/+i/IxYehslQf3PyfPP//HYGKuWRb5K8p+kZm8/8HN0oRBNUYsfwYM8GP5j4HRAyMDRgaMDBgZMDJwtzPAZx56n4Zna41zPbhd/qe3sk/3yjPZreTAWNfIwN3OwJIAEBb9CYAEgjA1R/GSrAQyFhg0JNeD4AeDElMsvI+MDqH4rc0to9hOmSyaoJvMLMAC7CjBKwIXJLI6MijqZzJpsByaxQU5Im81j4dIsByAVZSxDkGPXJ5jPrVXvw3VbshtcT0CJBVsxwa2Cf07chjZv8FflHOjU9IMTWoX5k1kq8pInTJYJmccslGaBBZBBCfYCn76b7jAFGF36ybR4apJYmbuH0AYSGHRE4SMAspCzaBwb7VWZ8EPTtdloOiLYbVpKDNZHEVICyVMkE4CuGIFKKBHpblF4twlb4t4YdLdgvkXAdIEbGExRTJyqccpoaGyJLPYpqqpg8XSYpf4YFnMeGVoNX96c/BQApyQkvjBskkAQ6i6atL6wAbJT43KqJPlIm3MbBoj9FM1i+zauwdgg0XM1qLEsV3xYoQwTOHh2a3iTDEq+2hhAiCFBuiax4e2Xzkb9hf+GY0MGJ4b3F9OU8AIAY/68ScQkhIN2CD4wWBBpMUB8AefvdACJwDSFMEK87yuyQYR/zQKHHYlfzVm9qnjtHVPv9TiefS/GwWVgmIbBQIhtX2eB/T4MHm4MxoQR2CifU2LWKsEnFxYB6yDgE1+d2UQrJFBCdKYHmEpwmgVAN1oCnlNxWUtptlCdvEGW+WhVKukMbqXJusEXhiVoleGIZtG9knJnpIETNIlZZcc1Lci3t5ZkEgtvIzhgofM3o3t6m9+3K0fYIJ377z9jrz59ocYqeuRp576thq5Pt/Mdn7/jM9GBowMLC0DLBTTd2HXrkchI9cNKaN35F9/8q5cHByWF5//KliTHfcECLJSGSA8Sr09PRgoEYSv0QnZvHmP+q27W0HDcDIuXvr9f682Se+M+eF2++UpyEd5PEFID74Dr7FhBSr4ICdFmSkyLXp6AHJ08tdP5MqVOdlEsj03bNgNIOJvFfBBUEMP3r9xXx/Y84QyRD9x4n05feY4mCKTajk7PEcePPAc5NkOYDtPAojpviGWBBkdu3Y+otomk4RsEPabwb5TfosMlPm/KbMdvMNvimCfXB44hX2akO3bvn3Psa7ucPruSPON4Mf3//qvZN36jffE9e+OJMto1MiAkQEjA0YGjAzcYxmYq4DN7RjBj5UwkMpggNxjJ5uxO6siA0sCQPyWktgxit2O1yhqx5SJIgtE9+RobdXYINF8VLEu9EJ51YQisEaOwEh7cA8AWMyvblMruxH8oEk6g4XSzjbMg48DgRMae1DrnEHQRAdT+DmdZsXciW3xQTijMVAw8I7gC/tIEIWj/ANwF1ceIYhmaRUniBm6/p9iWNSZcf5aUFsO83Mo0s6UxjUWSIlyFjDERoGdbA9GKgemABglnQBJdDZI1uGDtIPGgjnTnpXRTL1AEJrTHlQrg7VwVdQhARi9AAAgAElEQVTnf4zpHZCUGCUzpUd7TQOMmI2IyCAYCdeMes6BT2jhscip+MCXFh+F3FPi7FnpkKspgNlWGKO6bHJq4Ir02jdKD9ZUBuj1IPjR6bLgGAE4iWunUCTSpPZfBz9YJOHRyIMJwqD8F6Wx9GKFmobCRU+HDwboNim6wdBBewVHJ0CwkpRhRj4WhZQa2CMEPeDJql7pDeKFzFm6lBFTBkBRnVzRyEZR4Aul2cC+GRm5Ih3eZpmwTEtrrV3WQFKLUS1ewb9ZcYN1wHMt2InpAOBKeZ9U/DnJzkxCuqtT2lsCyrTe0lwRWr+boUPuD8Cc/uSAxHBi0YvE2oKTbRLnEl57rWCEwFyeQItU3Wq/e4IAdlbASFD+yKaR40yd0aMSgSAw4nFa73hRIJVKyRuv/1be+N3nKFBtl127H5cQpFhYsDXCyICRgdubAY707+5aA0bIS8rT4bPD7yvZyG9961uQNuq549/327s3X25tJTy4fLlX2pRQuFX279upgKfTp4/Ijh0P37XrHK+n69ftkLX921Rn6PnBc6Ex+JneGU899bK6DhOcoJcHpazo2aFLEG7b9qAC0ZKY34x7nhbIY3JeAPdtXwWDgzJTlLXS55F9xCCgvWXLXsXYeHQqht/YpGqfy/kxKIVsP4sFAxTm9euqTi7wgcsTYNm0aY8CaMi24PYZNGCnBwlBkhttd4FN3fAkmrAPQH7u3Pkj8tST+6Szu/uG2zBWuL0ZmM/8MMCP25tfozUjA0YGjAwYGTAysJIzUK5UZQj+uicuxqRQnlOA8aBeuXN9WDqWWQbrbg1AXcnHyOibkYG7nYElASD66EUWt4vFmir+EwBhaHJOkE2CfwSBBoEk1UyZfiFVochREOwCvi8AAEkU7FJOorgMd3JKXzFi48OKEVKrlhT7o1S2gD3hQjE9g4J4i7T4AVygnZQfXh3OtGJyBMHkcFjzMLTW0uVGcZpBIIXbLFTSGJWfV+sNQoogUYT5EfxFCH7QA6RgDkhrU0FcOZcy56YcFF+hsCQEP3RQhKyNQMkujqoLIEhSks0QiAKzhUHGh3NDRBm9zw8NDtKmhhS40ABezF/4Op8JQjD0V74nKMLPja/XaeK6sxI4Zs7hnAQgCxZvQYF/WANqrDAWV+ALYjCSlVApLqmEFUWNNshgabpg+ToTpRwaF3dzu2KyVFDgmEzWpL/HNiuJVQIAoFgiAKISBCYagJBW+LMQ1CgiRTQ5FzBEClN0zNCC4Ice/fAk0S3WaXLOSFqnwAAByGUNfMmfhqBFHmbz2vlZ1XxCsAl9m2STUFprJuGEv7nWnkDDHDbwEobwVRySVuYa5MbAcKKUmyV+GpJYALXA/JDMlAI/mrDdtjUbxIf9C2/pBTNnVJy9PdIXYZHGJtFMVVoB5lCKbCUEwY/XD1+WT8/Utd3qndqxNizfeLhffA3mYLe7vzr48ctfHVJFs4cegvZ73XT3dm/LaM/IgJEBLQOUFqJHyD6M6Oeo+Q8+fFV++tOf3hMgyEpmgNBvZfee3XL23DklQ+YPtMqa3g13rTBPAGAxYJnLEISgAXlba8fsV6ZRjsoKeSmCaPxjNM7jfWF/38YF1+NEHazo7voy+2UpklezDS/wxmJpUuc1/xrjVttdYFNLnhSbHJMjn70JwMgujz/+BICY+QNeltyUseBtyMB88GPLlq2rHvS9DWkxmjAyYGTAyICRASMD900GypWanB2clh/89jT8WrXaFnd+TbtXQn77sgMgBgPkvjkVjR1dQRlYEgAyjkJyERJWOvjB/judDhS5LRKNjclElJ4STlUwzgCg0OWqZpoBetSJCrZmMDRgsFloRtkCowMdTSYlaRVPsPCdk86OEB4YAXSA7OFUBs0usAvokwHQwZlHadyjmB0EN/QgC4SyWx2QmGIMYJCjGyYOBFrIGEmhPwRfGDF4gYQLVqHHRkEqMlJJSicoCpRsyqCPaUh6mUNaywRDMtaUuCBtBAtybSL+bU6OSwnyXmTAkO0Qu3BBLpyHOSjq4q5wRDLIBSMd1VgPsyvOezPuqsp6V4tah8H1zmc0YMUa8kpvXqM1XLh8TtahwD4d9qN/USlOpcQUu7pt03hSau107tCiFg5KaM06gSiUnEoPypXMeWnLt8mEXSt6872+DX375w6ekqw/IxZ4jiTHx+C7cUkqgxoYcixskrecLeJpDcr68PrZ7fiLATmVB7sCAM/Ousw12TAdePhn0DeG3iUEyyih1uhzQiDEryRBKrMsj9mG8UYHPiiJZYERuQv8msZob9f6kbw4ApaJS9xsH9samsoq+TX6rtjBLqmlp2RDp3ZQfa6shGudCvSQmaACXrzNe2QY+1CEvJq7E8bvVoecO3RB1qwJSWt/GGCIti/jhXHImJnEm7DLlLkERklI1vVqTBKCO5TeikR6lPxWX49WkDl9bpwWN2IP9QHcmRYrmE2WpuUtiORLFRkYSct7x0Zm02lt1oyRv7qvV/m/3Ikgw4bMj0bwg8Wz5SxW3Yn9NNo0MrASM8DvGYvhGzbsUN1rBEF669exldjvxfq0khkg7HsoFAJL4mkZHf+pHDr4K8WK6OzoVUXYlXLt05kS1+rPtabrx2ax+VxuKcssdqznz78Tbc7fxlI+k8DCh1eCH++/93Op1qbk29/8DjxPtHu7pbRhLHP7M2CAH7c/p0aLRgaMDBgZMDJgZMDIwO3NwP3OAKECD/2IjTAycDczsCQAhB1KJJLK+JwG4ONjAAIgiaT7fxCI0L06KGmlm5XT44NsDxV1U3O+JRgi8PioVcE+8DsUA2QqrgEhDltNSvjsdFJma07mKU4JBoAdXvhb0Nic4lGUuGqyesTm1EAOc3ZKEmCYlDCdTBPGDAAXP43O60G5qunxUbF4XJIKZ2G6nZKkA1881IHDUzDpDE3LBBERgCP0AIlDwonMkbIJrhMwxva7UAgHaHKlOC2fDY7LwZOHUSxfK4N2gESQYmjPaAXl3Y6gjGbNUkbeWFg3OzTg4iL0uQliXDwxINXtfQqsSGNd0+VpeJQAaMD+EZggeKFHMJYQ8FJkXbUFhuUd4vDNgTJSB224LLeXvDAmg7FPZApACIEOAh7s08OWRyAnpklFBB7cLmR/EHgpXU5LzxaP5JIhiSWmIIeE5I1h/61+9X5dzSG2olNiZ6dk8OwRiaZGZRKgyE55QnWvuwAODNJLmTAQQdB/j2J/0DMmOVMFq6CkABAG5bJczowyQBcwNKLT9XMDIIYOetAgHZmeNT63g4+RMdOUHI2DJUL2xwwALzI5CDhk4AGigyu6JJu2rTqrAx/c/qy0JPolCymq/loKgNcg2CS92MYROTMRk0zIL5v9nQqgc0c08MkDKY8u79yo2EzCJucTCeUnMwCWkxuSXZs2b5SpwdPKS0RckD1Te4n0QbpL9yOhIX1qIgk20vKHvbkJviUu2bep7arOrO8CVwrz7kTQ8PyjDz9SslcG8+NOZNho08jA0jKgyxJxaYIgb7zxmnz3u3+MUfS84q6+WMkMED2b/es2yB999yX50b+8Im/+7p/loYefVybhBIBZPF8phfzVd/SXt8f6scuBIToEfxQyPwh+fPcPXoL012bD+2MZD48Bfixj8o1NGxkwMmBkwMiAkYEVlAGrxSxb+1vk+7+3E2o0c8oibtQGexsGEC9Xl+93BogBfizXmXd/b3fJAIiepmz0CsAQGo9rNLJwqSBRGGiXzRoIUQYDQzMrpzwWDMYhdUUQglGaAmzhhTwSXrMAOvRw2DmWE5JEAEsyYIGgZj7bhr5MLZ9GIbyqPEamzPArAIDCqBRjMg48oFKMSy0PdoalKs1F+HFAUovAigJb6kEWSHk8rgrdENRSU1moz5az8AMJaIbn6GI2TXgF9fZqVVK2rNQgl9QSKCnZq7WVgqQ/S0nZe1kuJUelx9EjvlqL7Jtug5G5T1rBomi2xNWyyh+jM6oYI+vW9cs4iu1nY/AowbxSe1Q+S16R44c/Udvaue8J8cNLhcAEwQ+yQJyOTdKRrspad7eS7jrw8Ga1LIvqhVxOIaZETts9ThkGuEHmCsGHt09+JO++e1DyKPibcHHvjzwofXbIVG0LK5mrwi8/kw39rQAr2iQGw/m9ax9Q+55NB8DogbyYqwBPiGaVE4ZlsiDlln5pgzn8RDwrn378hVz49Ttq3okd+2W7kJHhkaDPpbY/CZkwgkZWmoKXUjIazYvDQjCLriAu5YtBWak503LKTGnnE+cNiU0aJRl1qSoWjWh4ztfodEa1QyN6BsTMxAWvjwRYSQRcZoouNY3gR9jbqUCU9MCUjMPIXkXqvDIyn4mOK6AnkYVJe1EDzSYLaXnG3iKhlhbFDCLAoku9Eeijjj7PQwJ0ZH6ooOE7XwDSnB6Kwfzcrclt1aLwLKF8WN2sRFt6Wf6lzwelrp7e041zR/v+WcDEsoE1w3l3Is6fPy+vvf62dHSsV7JXBvPjTmTZaNPIwNIywBH/ZIIUIRH5yeE3IH/0tnzt+a+tSrmelc4A4RGhFNamzVvkL/7UAgbcr+X13/yTrOnbokzKlZ+GhYzSO3PtXdoZYSx1MxlIpZO4d0zL+XNH4ftxUnp7/fL157+jwA9KX/GBtlpHt3gOGHF3MmCAH3cnz8ZWjAwYGTAyYGTAyMBqyAAZFl1hN6SurpZjNeN5iEboyx33MwPEa+qSFtNGsZs0Q4F4Ff665uN3jBHCmimlfxnlsjaY3ABglvsbsDzbXxIAQj8DPwr7jLEUPD5mYBZd1gCIS4kymBw0BYcMFkb/MwhkkMkBVSJcXLRNKPADUZzQXulnbnc4JZ8D+AD5rFkQJAXfBbUklK8cDrkyNAg5gW4U09E25KxKFYdU4cehS21xOXuoLCH4Mfjgv8leTsI4HUJUkgXzwgHDTk87+1CA3FEVJfEZcaJI7wF1Ip2FLBGN021zF8DaWEaiOQ0ESMPZOpexSRjPr+vNFbG8Myi10Ytytskj2e607G4LSsfDbRKxhJX3heD/XGlGkumCMkd3NLuVHBOD4ARBBoaz7BTPpgekrdInB5IPyNnuopyIQq4Kf1OXL8guGEQ/H94ojta0nD53EcahHWI1zag2fB43vrRgpNTBD7aXil+WTM6pivVk6DzQtU0iT/cqua+hcFFGsc8Dw+MSA5Nkfdd6iQRt0uvqkva2sLw9UlBG4T6vX0AgkXI6I01FszjbkM+cRUlL5VpsyufDHnJLG1rb+9A22Svb5K13D8vnb/5GRjvPypavPAqWCsANSEIpgIem6QA/FHMGQTYIo7kZxuZNJQV+UL6KDA5Gi8MkqQq0tAAwMcrIKQGToak0lic7QTNYp2l6CicIt5OHZwzBDjJKJuuAFrAmBVgok3oc23I+LGNgbGQJeFSdMj6Sk3YYsBK4MFszMg7Wy8z4KZH2sMxcvCTutf2yo61bWsIwMUfMIKdsi+f/0JVBIcOJ4YR/hR4Ec+g5wiCrpa/VMwvStGIa5bZWQvBHlkDHQmDH7f4BJrwyDW+Vd999D+a3Htm9+0mVEyOMDBgZWN4MkAmyefMe/B6MyHsffCI9kMHauXPnqtPHXw0MEB5pFsDXb9wkf+Lzw5j8kMr56785JeFwO36vaSq+vNKIy3s2rq6tlzDghzE9PYrvzzgGSTTLC88/KHv37cU9Rcfsd4j3yONjo+LDMQ+3tq2679bqOipabw3wYzUeNaPPRgaMDBgZMDJgZODOZYADUnKQ1p9K5lE702pRvC/nANCg1yF2651RwFjqHt3PDBCXtOK/bWKtabW1vCmx1LQtuhzBDmcV6jEIbsdhwqBuPDjay3PuzflaQiZNZ1H/HL9joMuiHTUWWJYMLAkAYc8o+6SP7NfBD0pSOexkfDiAqJmUDwiNyBkENXQmiDmlPTRWwf6w4o+hy2MRBGGU4ZGAUgEK/BZlhK5AFDBDHHZIIGGkncvZhtGSgDWSms8GZbY8YD7Q4yMAuSappmRkIqvaoZl6LWyTVozCZ5DNQLCjMVjoT1dS0lTxi9Ov9YnMD4IfnDfTW5cJQh083xeQ40MpsexAYX/zNsm5ixJzbpQApl+EWfhFNtyFfY5q+55t1b9cLO5rF9YOeFCM0gQcklUjwi94/Us+T32EklhX4InysVcDipy1MGy5pyXb6pQzFP6CIXkHjElHATqJdh1XpuiJibqvA3xCaOYu3Sja97kkm0HSogAbwBC5eOSXkn7gYfnOjodlMD0sUvfCHi01idUH0ANCTZ72Epw5tCC4MFaOSXoCuUM6XFMaA2Qa6wXx+fe+8g0Z2rRPPj9zWH7zix/KSN9++frexyUAw3ICPwRPUgC6ivkZSGGZFStEASMOqzIuJ0KmPiMm4bjR4ihJ3j4j8ahV2t3tiuFRLMIbBeAIARNGdNomwwBNslmyNbR80/eDoANBEYJ1ynQd0dLnkMlxTXoM9ucoUATF5AGQQwkuBAvyDoBBLTaPTOWzEkPedbtynuuMyXhBnLacrF23FtIlvWoaWU7jk3FVOEzEx1Tf9O0SCMmY0IfctGrf5bZKE8zjORrULMs70iBfrMjF0YQMjWlSaGpnEBGMjNjUE7ytNwEEoQ4dPCTHjo/Jo4/8nkQAYhphZMDIwMrIAK9Nu3Y/Lq/9+rK8/fZb0hFpx0AD7UZxZfRw8V4s79V08f41LsG+Mr9k2xw4sB/m6Ofl0sWLMjI2InE8lBmxejJAVvPWLSHpX7tP1mLARAuYovMNz3lfk4C8Kh/ACIAYcWczYIAfdza/RutGBowMGBkwMmBkYDVmoFiuypGzk/KLj85LrjDnNdHV4pUXH+2TrfB9Xc643QNQl3NfbnTbcesZ1AK1tTD8HH6706gEWxVLgwyNG2Vn2DFIOVDcpMAOu/iFDBMGNHVmu9ZUmRt0VjEXpNW0TaK1L2TU9rHkUd8z4v7IwJIAEIIZ09NxVaym9BWBDwbBDhqX0+KBPiB6hAIOBX6UYJTejEI1gY9yFGirt6peGf41c74cbEPzACkr5ggH7zeBtYDKN4rW7TIGzxGHNQ8E1678RVzwZ+BIfNoWEBQpYIT/NKSZYkB2u8BCUR4kWS9O9ypeviw/kMUoeJPfLNYYNuRLSCXL0ftxBZKU02VJYl4sdl75eqg4rr3gW0mMht7dJJjMTcdbGpTTc4NRvHx1gVlNbIjx6Hn1qb11vTInp0/H/HgHTBAuZ+rRfDwmxq9eTl+vcX16i4xBh5rRs/kB2TKgFexpVC7b3JJte1He/+h1iV66KAd2f1VyW5skCYCFMYXtMKIhmwJU2Ar/EgOYjuuGBfNVyx58huoT3/da3TIY8UK+6xnxvd8tB4/+ViZMkwpgcXvbJZnMAK+JqnZ1QITn0GQO50YJ/A+AJGSL6B4euVwcTBayBFA8gBcIjdS5fA5XxxjoKU6cT6UMjinOKZ6HZAMxWgBwRTG9Ff1mYS8en1L+IOU8fF4qYHDgBG0HEMHga6IcAkhC0MIl7bZ2sbZhb1JTEna1gg0SkzIYItxuIBhUr/F4UrFrenq7VZ/04LT82ITY/C4FyiimCLxAuAdkODUGqZbLHUmAdb/8YEB+d+TyrA6mDV+ih7d0yt98aycAkLkfiFvt68iVK/Lm2x9KH9hMa9duUSbMRhgZMDKwcjIQAopNTwrKMu3Y/oWEwq2ryrtgtTBAGo84C+UEQlgU37v3AcXmNGL1ZcAC6TIr7luvJW8VCATBqnJLEwbj3M8Pt3fjyBrgx93IsrENIwNGBowMGBkwMrA6MzCdysjR89GrBkPHuwry1AOdy75D9xsDhAODGAQ3CDjEmrQiKz/bUXDsLz+v5g9XD0nRNr1kUIJgR1d5PyS1Nqj1G4EONWGBaKraUGm2SZf5QcUMGTYdml2KrBE9CM4YLJEFEriKJy0KgLBwS68DSgHR9HxmJqUAEHoh0HBck65CgdyGke9YjuADwQ8yOKrw/mjGqHf6cNhwjSmUspL1Z5QpONedhsE4GSBcllJYDBaOAyBQ+ANhyC5pXAQyPShrZQUIwmkz8GAgCKKzTQiGcJmShcCHxibJmfNyGbYMZIEQ2IhemVHEiAwkkgLwiYjTEDzsFU+FyC98PbAM5a4oYcWK//nMpIwPnJeIKSS9SZ/42iOqfx11w3WaZKfRjyTYJ112s+RMvaDX5eSB3naZKERVW1NNWhU8hEL9mn6fMk0fPXNWTD4NbW6DOfnOme3S49EAEN17g5Ja52A2/hMYpY+Ogamxs02BJAQ49GgLa+s0gic1GJ9DVEMtQg+R8aGTMpGdlM8xnUGAJtLRJ7HPjsi/Vaelx/eUmk7wRi6rt7NB4/RxF3KOdRS4k66/6p/x+pPGFaA9VoPfyMn335DKYEz2ff1FScRi8migVQEWlAiz1HIKxGDkIAMGao96T6YIwQOyQrIwjecyOouDIEkCi1WKYP44e5RnC4OeHD76ydQZJJxGxkgUfU0ALAnUXcdbXWaAI5TL0pgmBD3IANHbJ3tD84nRgCMB20gBaPUga8UNzxWe/2R5cPtkg0xNTqrPNYAfA9EMPEFKKPSvUyM+ywBTCMJEIi7F/mDQG2X+KNHZjdzFNw679pUn8MGwWRe9BNxw72h8fubsOeSoJI8/fkABTas9KpUKdN21YqUZsn4GoHPzR7QGNlQBNz2TUzFJJmKzDfn8YXynffi++WEQvfyA4c3v4epYk+cwDbk7OtbIocPHZPv2bauKBbKazxAWxTmwxIh7MwM8vivh9/7ezO7cXjWCH9//67+Sdes3GoDTdQ465T+ymRl1T7sYMMf7OP4ttiyLN7y/5T2S3e64brtsb2ZGY2DTG/F6kaurCThWwXWS33V6OS2W0+vtrzHPyICRASMDRgbuTAYi8P/gYM9cQ82oCzVAv3tuMPad2fLird4vvxu6JJWrqgELBD4IeugsD84ne4MABoEJr7lLBovvK4DkekwQvd115mfFV6n78i6e9quW4Pba4GjMbRZNGDhd8yjWCKeTJTJjmpCx6ucSt51ZMiBzg10wFr/LGVi0+knpHo56p6cFi84EPwg8EMAgcJGIk6WhyQKx7ykzLiZ4rvf6AuAdYD6Wm/X/AMiRgek2ARA9Evaq+PMaS6Naycp0mcgKCuQFk0S29EkIxuXa7TIL5fDwyEHyKuCR6QL0n5xFcWetYkMt1wbD7WrGDl8HFq9hQF6/eYZnuwoFvtSNnmOZKbHA08EaA1oSdEh6HIVNZ0qBGaFgSCpdMNdORwEmrJfeCxUYWofEBtDEg3luGGV7XVbx2f2SLAJIsZYl4FkrwVpWiCP3u8LwRMlIs9ci7smczGBfO51B+GmYhEBIDGBGsz0gmwftsq1NQyn1XLRafBJwletshQ75m/3flF8e/1Q++83vJNy5VrZDYurxnRuUSTsNyunLMVMaV6vz/Wv5s0o+qxt4JlkfERcMyMEc8biD0tamXXDanj8gA1sfk1ffeU9OvPW2AkTanS3SBiN3AkPFsFMBOm1rAVxQEgym6EMzE+KyeKULslQzHoA3sGOJZFMy5vTKWhTRLuLBh68TfZBQ6TugzF4pifXth56THKS1snGwbXBa0A+EkliWWgr+KGATAVwgoMAHIYIgDMpZMQg46CwMfo7H4eWCV65DQ3hKainQhKwk/PE9QY1MlsX25CzAkSjzIaugAAmHQxO44nIEPvhHZgnPTxqhZwC81QCAJOIxtKmxZwjGKMYJtsH12Nex0VFlsO63eMQPL5EEjgGBGr9FG5NMTxA9huGvwugFq2S5wweGzIGtbdIemvv+2SxNwhsDt+P6D6M30vdJgEMfHjyqzH5bQhrD6EbWX2nLpvE9+PTTd+Tc+SOqaxvW78Ho7ScButZRtgU6rBf5s/Q4AshrA0h7vxf1qZ8/ER2TUycPytmzR+A9NIjvVAZ/WRROnAoo6+zshUH0Htmy9YC0tUYMf4QFzq3bOakZHlnbdzyiWCCUZVpNLJDVyAC5ncfOaMvIwP2cgfnMDwP8WPxsGBq4BLD7E3nhhRfF69VY3wutRaDi0KFPlD/gSy/9/nXB2nh8Wt5+6x11706Jv2sBf6lUSj54/wPFDGa48Cx4rchAipjPg0UMRgqF5vS6F1o+PQM55CUUsbicucl53e3q7XNZxlLbDYVC8gff+bZsgNeTEUYGjAwYGTAysHIyYLWYZWtfC4zQWUWaiyb4gLAustyx2hkgOqPjenmkF0dLbaP4zb3irrVB1wX1V9wCEFDQzci5TMS8a5a54agGpNf8mDhqAQwy12wBuA1dKovrUSqLoEmvCctVrl2TuV7fGudxmw7YATQGQRAfpG/c5jaZLPdJ3HRnjdqX2ldjuVvLwKIACJvXitPNqvDMoNxVrZoDWyMHBgfAB1tASVLRG4Gm4xaPC8bhVrk0OSYlgCRFeIRY7RobIsCbUBSkyfdQJuj4BlAiS1Ao5M0pNKhksgB9Kfz5rlikAKaD2+0FWyQhMQzi7wxwhDA/g2mShJRVkVpUWBy4ibmm3bSyL34QqYYS8MUAYJFBgU1FXb2IxW61DkAR2GuocEAyK9TkkEqvW85D/ioI3wy0CPBj7uab3iBiMysgqDFK0ai0BOBvgiI8mQGUZ2Kx3wFwZwYADj0wMrgAf9TLHPilF2bkOusjlUmqXMWwHLWTInZ6SFghr1BUTIiv7XxamLPD509I0jEmbnhs9ENCgw8bVvwVS5q+3UhmTkrDOZyTr27ZKCVHGACST4EQ4apLujoiYoOxfKulVXZ3b5cfvfNDefXDt6R33VOy9+GwYsHQL0UPGqU3Z7ScOuwFBaLsCGg+IILi71osSOBlJ1ZxuZlcDWT5o++8JD/68SsKCNm5ea/099PgaC5jPI+sJhjfN2vHheAaQRHFDrH2QAALRB5LVslnNTenlUxWAJQOXfoqB8kz2t1nc1ExNYdVfnlJVIbtJbSFT2SD4KSEwZVThgaHJFOyAgAhkI+6CzMAACAASURBVDIFwAJSVegzAREnHJF4foodklw4L7ybt0p6dASj1DTJK7I8dD8QvtflugKuuVEDfhi7R0IAfxAz0SHFDNGDBd6VwoCg0deDmyPqrzF4aG7XaGqOMDx96rSMXEnKi9/4tir8r/YYGx+SV175Ozn48etqV5586pvS2dV/XQCEP8yfH/tQjh17Xx7c96xs2/bgfV3M5/fg9Okj8hoK7SeOvSdXrgyqXLrcblwn6RcFcLw+MrSlpVUePPCcPP+1P1Fm3Svl+7Paz+OF+k8WSKS9R7FAPv30sGzdugUFp+UHaxfq6/xpt+uaNb9d47ORASMDtzcDBY2Ae9savRHmB+9JKspjcGlxL4/k5/1/GL+vSwnex3LZxdiuHBTEe3c+/1wrCH688fpv5Rev/RI/+h7p6NHu87k8Gdh8FqMv5AxUA0aHhuStn/1CNrSH5IWnt4m3lpFU0YyBc9XZV67HacdOX5BffXxeHt+yTh49sP6qZRrXSXBQzkcXVPe+9vA68cO3p7G9xmUHh0fk3967oLb/7W3PXGuX1PRDl87Izz5/F2BSQLZv224AINfNljHTyICRASMDdz8DZFg4oHZhC1wtxU2FGxPmLXesVgZII6NDGYxfJ3TgQ5elouzUevNzMlPeJYmaVg+gF8d8EIOABOWpGotUBE9SZajjIOxl1IRNbYqpcaejkSVCUMbwDLnTGb+z7S8KgJQrkLPCCPhkMjrbkzKYGGRo5AN2CZq0QjDBD0aqdkESVxzwUehUI+vJ8LDnKC/khNdCQIoTIHhwGjwTGBkAIE0TzSiUhpQ8lilflRo0j2hM7YNGeQfuqW3eZvh8oJAKVkcV2xsC+KFH3NQk9DfvVMwPjSWSSE4p/xHVfh38yBQT4qJZekPoN95khhTh++EH84PgByMU14CAFBgVZIsQUCGwQ0CFBfycG74QYH/oQbNsnxsyWpB2ojeFFas3t7aKF0yGZhTwDx77WIJ9WrrDhbl+dLR3SdAHwAhABWWJWMp325tR9GsT+L9LaWBcNm3dKdPJhERHBmRoeFzadkI7HMupreN6nq9aZNB7WvyZVnEeGZRH/XMPODx2OTM8NAB8e9IwI+c6AF3E6pI9ux6X4bMz8tmFt5XEF9klDDI/JiZgRgRmy2h2WtLTU5DzctIHHUVKr1y+NKYkvfQgC2V6BHQxHKLSODqENBEE+df/95/lvVd/Ji3/c7e0+nrq8mIAnvAQRkCED07qoQkskFzZKyVTCzrpgRcF/DnQSJPJIxUvpgEM0+nyBH6Y32opKSZ7J0AUGKUDAEnOVAFCaewSHp/RaF4clml1PIpgbhCgSlWapYpzy4GzzhFp00ASmLnosmwpHFsGR32R9q+bqusPdmRCaZJacyPXyPagBJbIkJLw4nwWbHWJLeUzogCb5Q+OMkhni5Iv6Tb3Wp/sOO88+A7cjh9hyiucPXdOwmGg/aFW1SZIZKs2WLg/e/aoXB44KT4Y3DMGL5/A59NKPmih4jzVm8hqIPjxzls/RR4iqpBvtUIyrZ4LXeFp/me2Pz9f11ODmr+snuj57S90AK63zM1sc6FtcBqZHwSD/vlH/1mOH/tAmpGHRx97UXbt2g9QtW92tYmJAfn880Ny9Og78rs3/hnMw7h897v/4Zrg0bX6uNScLHX96+WpcZ+Xuty18rRc0yk51t+/XY4e+SEA/NiqAUBW8WVluQ61sV0jA3c9A8NgIX9YjMmuTd3SxhvjW4wbYX5MTU0pdsJxMKk5qt9vrkii2vSlV3aJbINIR6cayX+vskk6OzvB2O9Y9F6PLI69e/epI7XYfSGZJF9/4euQCK0tuGwj+NG3Y598/bmXxdeq3UvNPxUuXzgtP/x//pPs6++Qv/rDhwFq7AFj48uFjUyuIB+duaIAkBceWi//8d+9JHu2cEjWl2MCMsL/9Jsj0tUaVyDJn3xtj7RFtIFj85c+cuqi/A8AIM/075A/2/91efrAY/MXmf185MgRBYDs3bBOTVtM0uuaDRkzjAwYGTAyYGTgjmWAgyCGJtJy4mJM0rm5wRAhj112rg9LR4umDnLHOrBIw6uJAULQg6wLazEoXbJfmYw3GowvtKvXAicamRVc73rLNbZL8GQ+S2Oh7d6paQRlWs3b4EsdVwwWBvNheITcqYzfmXYXBUBMtbJEzRlJQ3LKD/YDg+Xp5lBAKG4zg5E7NVCgGVY83DiKGNmP92UUr8js8Kfgv6DmcsQOCswATSh5lcArQZBwPizJQhxm3KNiG0JRHbX7DZHeWQAjjrUd5prG+sB2uO0EivItbRB6AvmDjBO1LTdkqlB7LxbRH7BTCqUvl0cIrJjg2cBwohCsHNQRlIsydYXlkODiCONzylPNQRuaZ4glqK2XoPQWJJYSl6riaceFoJICe0O7QS9UfEqmSbE50McOsA8Yh2zDkgH44ZzQ8hfyOCCjBdQSxX6/p0Ocdsg7ubS+FCHx5HJA2onUGkQv+miD7Jg50C7OJr8E21C4B+DRGO6SQ3bIZulOJMS8TkNhqcnLIvwXIxPS2d0PySzgDNaUuCp2idoKWB2MDrA5tuzpl9j7UXnrgx/Kxva/kR2b1ggwKiFuNDo+LJMTV8TUARACQWAkC7P4qaaCeCd86AuPvybBRRBnpgSgor0ieFESXf/+z/8n+T//+3+RgyffkTXfeElqWc1LoxMAWCrXJASNrAB7agQ5GPWkJ7Ng4pQmMcGhHE0amRdki1BOi5ACwQ96tqh8I6w5gCGg7hMcSeaTYvHRYJ2QErGznAJbci4tP2aAIVKBkXqDHiTBMpqgE3wLw+uFbQWDAZVHAiodrThnIenkzQ9gGvtFk/W5B3pvUwnTgmKBZBrN0Lv8XuzHBECjCi7Xyx8EP14/fFnePzYy2xmHzSLruwLyB09tAIB3672kMfylgRGMKt+LXIBf8+Wv4fIn4gZ6QJ+KEycOKobC7t1PqjVZoOc0yjR1d7mu2kdKX/G7R9bI6CgE6cCSY1AKqwk3DvQPYZRK2lWREkT0FiFgUoIhMuWyOE0fdam3p89XKyOaYcLL/PJGpFFai8uTfVKBrqZarqGt+qro79wy7NP8Nqjlzf5xP9gnPa61zdkFFnjDbQ1cPiOvvfZDBX5wpOTzL/ypPPXUtxWA5HDM+cPk4DG154Fn5O23N8hrv/qBfHLodQBpERhGd0tX55qrWme7KifX6GNz89Xnsp5H5kXfZ+0aqeWdjXP/OPpXlytbSi71Tuk542e2P3/7C6RmxUxigYs5zuD6PD4Rk/51G65p7rxiOo2OLP+4rZWUDaMvRgbu/QzMBz+2bNm6YNGdmSD48YtXfyEHX38dgxfs0u2tqVH/njqboPGVhfSfv/uF9PWvlz2796hroPkevcIsBmjoZ9FSl7ve8jcDfqQvnFsS+PGDH72qNr0U8OODg+eXBn789A1JDjuXBH78l/d/rLZPlgiBECOMDBgZMDJgZGDlZYADuc8OTssPfntaEjOsf2n+p5GgU0K4N1huAORGf2uXI8ONbI8AZKAIfMxna9xsv64FfNxse3djPV2ei7mwY2i5FQO2o7UvDFbI3Uj+bdrGogBIzYRijpI2GlV+FvTAoLeH3+1Unx1pMDKKIwrUMKG4xwH9LlCc6f3RGGR96GwMO/RdqYg1BQ3YNSEwK1p9YIZUZcpfho+GWwEYpam8HIdXRy8KUoJ6rXutRl3LoTBH5kZrGEwRM3ThUOhOwG+Evhw0aM/Aw6EMz4sW+HX0owjNIHAwntIoVjrjgtNb0XQUtfNj+RGN+RH2y95kK4x5r1zVdwWyQP5KMUAQBEEIfjBqKUi4BOApAdNzWK1DykmT/yLLgT4WX4yfk+EWryrku1AQ3x5eD2PsFvHYreJ2AkhyANyo5CUNCauZbF5MaYiCrWmHpl1VkpW0zGRK4sdnPxxJGHaASo5UFTAEiu14nwfANNMMKTK87w91ABCAtl48KfHUhFTSkwogCsCThLQMO+YXJ9E+5pl9bTIRB8oB6a9nnnxZfnX8VfnPH/5I/ve2/6i2Q3DG7ozLUNpKQofyL4nG1CzFOqF0V7zilp1gYTAUCwTICUERPXp6e+XPvvGn8n+f+hdpOv6RPNP2sHhqTQr8IGuDwEWraxKSVZppEWWvKIXWGNPJDJggAC8wIo0AAz1EuK7VDsPzchKMjnFIr2mMDIsDxVzUPZvdAUhdxVGEBOgEVIVyVjMA/fmZXh1kaQzXGUjclg588L27PSxtOOdGUyX4ESCndQP5mfQQABGU3LDfBGTGB4bED0YA+2TDsaSclr9TG82uzJqwXMWVE8c01oHPDAuryx1kfgyMpOXwmQmwaDQWSDMkcBicN8fpufmeZpDrsdEMRg/2r6oi8EJ7TObC8PA5uXjhMwkGQrJv3xNqsTOnP5FTpz7EvK8D6Om+SiJiZiYhJ08dlsOf/E7OnflEUqm4HD78Lgy/x6RvzVbZu+8ZgHEZBaIUwZrbtGkfRqbG5cMPfi2x2JjaxrPP/pECCopFzTNDB1yiE8Oq6E/goxvgwfbtB2TX7sfhWwT6Z/04Evz44otPwFo5DPCuQ20vGLjah4UG5GRkXB74QjEwGv1MyHgZunJBjnz2ppw+cxzXwslFt7lQ7vRpzMeJ45ACO/quYn4885XvQH/8e9LZ0auAm0aAjIDiWsjlOZ0+de389PAbkkonkK+kAm10oEcdl5HL2M+PFRA1Py979jwhW7ftvyov7Ad9XMgy2bhxn/gDrVetz/76/C0qFwS6eFwJTOm5dOE3hdP1fjfuM/um54zTeUw3bdqzqHTI9fJ2t+c5nfjdAmvr0sWLyMEDYlkFxrOrHFu924fY2J6RgWXJQFd7m+zagME+Du1e42Y7cbPgx7MPhuWRXevE7V/Yj+zwZ0cUk2BjR7uEwFJvHBRzs3011sMzQoPs1VKZHwQ//rc/e1j2PbAw84N5JfNjpYAfL29/QjJ15rhxzI0MGBkwMmBkwMjAjWZgpTJAdG8PenPQuJz+HPTv0GWsbnQ/77XlCYJYzZpROvetV8AYRa3RkMZaHUd6UQCEDBAGi/8EAeBPDiaIUyoBsBlQVycbI+wKiQMoKhkik9EpKYBKzjCjOK9JYGF0/iRYIi1BNcI+XfRJwQoJI4z+pSE5mSCa7we9H/KSxaD9OGSIauMZmcBofJrhYUi1FLF+GN4LEYAvEUtYMm5IJ2FTPUEACah8p9NZJZu1c/su6UOBLeTVWBTsS7m6QyxmAC+kidRjYjorvz3/gWJ+bIfM1N8G9kNLKyODgWE5M3BU8xDBsgRduE1yCXQWjMShK4wcJJqQkbq3SGu1W2nhZsEcYAzkxyXbphXnd5uCsnbdDqklUaEvVcWFPpP1kYbHRjY/IfmpejkH25q4AJ8UGIoTxOD0dK4ILwuXBCwBlPPnwA8HpIsEwIdlyqymMyJoPgF6Of1BTgM8sHgg8QRJKIIY3mRZMScYBD/I6OD+mAEU0bD8J7/+B/lvP/9H+ctnXpTWYLtcPlFQxugegB9TTTkFgvCVwVc/wCqdAcK2NGsVjWFBdgltVDZvWCsvx5+Rn114U8mKPbrvaSEDBFCOtOHYUtILalOzYXWmpYiRyOXKEEYha8AIjdK9Pi9Mxq3SAlBoEttlEEwp2n3wEMAxAhBG3xV6iRDkYFAWy2r3irkZkmWYr5uuDxebJQrfFkbJiws7Oj40ckqCBDTgQVJrgfcMZKv8lpLE8jBYL2YUIMSi7PhMESb1LrHVmUQ9HT5JjE1oYEhtWlLZJhSMaewMlgwKudaKDQwQMF0wUv1a5pCqI3chKHXV2eqSfZsaUCpslwwQzrvVoHHm0NCwyhWL2JQEaixw32r7d3N99j0NQJUyVsPDQ5Cw2qfMokvwmVnTtxV+Fofl7JkjsnXLvlkvEK6Ty+fU9I8+/KXyuSAgQcAkFruEEakxWb9ht2QzKXnzdz/B9WoCLJHLygycyzBoAk7WRblckXPnjssrr/6d8swgkyQE3WpGEqAfJbkIEDz8yIvwWvmeYlMQBCHDgeDHKz///6R3zXb0dbMEUNhvZImQjcL9IsuCXhvskw64EBj5+c/+TvXHARDXqX+X5m3z5W/+zZfAn4WOz+RUVI6f+EQmJ6Oybft+efTRb6j1rmUIr3lSdMs3fu8v5ZFHvw6AphfLz8lVEPwgKPFzeLIQYLpWXp58+lsKaNEBKh6Xjw++rnK5fec5sYFGdv78cVwTNA+pLK4xPKYEap5/4aK8/PL3kbeQJAAA/fa3P1K7xnXmG7PzmBOUIhjzkx//V4Ao7fCHWbdQKlb0NDKPCJidv3RR5dS5CgAQpN4IIwNGBlZBBoKeW2OX3gr48dyTDy0oo8S0UfLov7/6scogvSEuTs/Jm66CtK7YLt4s+EHZq2uBH7rs1UoCP7ogN3w2Orhij4PRMSMDRgaMDNzvGbA0mWRrf4t8//d2Qh1mTgKcg5B722/H0M9by/BKZYDoBuMBcx8MzDcYwMcCh3k+e6VLHlTSWGNyZIGljUkrKQOLAiCNnZ0FQTDiPnFpUswOSl9pWq4spCsTdLtWlsgCmcij0GadwYhldwvegyVQlxeahIQUZYYCNgcKhiZpAhOkWkExm8CJpUmZoYewvN2vF2phlo62uL63h6AG/ETACKDptROj9AnROAsW2frI0wr40KV8JssmabFowAJ1f1sgpTUD/T++z+Ei+En5JHktAjEt6XDVH9BQ3O7ds1E693jk0umKXDZfUCmAa4aKGdzr0ii9aXBGKtANTMKzRA9TxCVJyoh0EyaqyFk1w6/abwnukmaYrBc8FQmENA8Qgh/TqYwUyg7xYYfJdlAO3hCaIvODUlDUE2uBJBaDTA+CLTrzQ5cWA0VH3FNYUCOJaPsc9gAU2iSXPj0ob174QnbCoK/tOUhy4fDEqnOIA0EcJ5ge9IN/7tEX5QfQ3n/zo+PyjecCcjk+Kd39rWC3qM3Pgh/WWFaKYae4J6fgId+qmC8MjSkUV/JXjUET9P7Tn8qlk+9Lf/cG2b5nqwJoEvA9mfZAliylsT5qkAwj+EGgIYiReChBK08RlOPgB5ASf8ijgJBMZkx5e1ACi9HiM8lk0q1eaY7Oc4N/yZnJukE60gigROogke7pQWP1C6mkOq8aY/z4oLTt8Es0A6+aKU3ii3JYSrIMoMjQFGS2IKeWCJtkfHpkFgwh+MHgSHbNG6IigwQQAX5dnZGrNnfXPtDn4xsP98tX9/VetU3dA+RWO0IZoBgK3T6wFziifDUHR2SMAZw4euRDZdK9e/ejyiyasXbtFlUsP3r0A3no4echz+efZTN4PT6MoH9G8oWMAhjIACHIQLZGONyhfFEuxKMK/CCwIvI2AJQ2xYxY07cFUk9rlQzW2PgV+Q0Mw99952eKffKHf/S/KnkoAkvJREw++PBV1f6bv/sxwIsgAIPvzTI9ktjmdHwKxfiYAmwWCn7H2Df6bBBUYExEx+Stt/5NsVP6APJ885v/iwJHKOfEbX766Zvym9f+QYE7NM1+4YW/AOhy7ePM82FiYlCGr5xU8ohbt+4FKLNpUWYEQRAyQfjXaMBKpgWZHzRSP/jxa9LV1SPf/s5/UMDU/LwwN3abC/vwVwqg4m8FgXmCUrnc6wrEevLJF2dzGoteUaDK++/+HNJbv5GdOx+TB8AkaW3twv57ZACA09mzR77EqOF5kkon6+ySUQU6BQHCmwG2r5YgSEmptHC4U4aPvCvlBlnAlbwPBgNkJR8do29GBm5PBu4k+PGf/v4V1cmXntoj09MJACCUXjXiVjJws+DH9ZgfKxX8uJU8GesaGTAyYGTAyMCdz4ClySxdYTekrr78vEoj9OWOlcYA0eWuWkobZSFj8uXO10rfPg3hLVBEUmowRqzYDCwJAKHckR6UfrpyKar8PPyouVOOqjFsNs0UHWOh1WSyNroxkpg26DSzZlBmSMD6iCe0ZexrTBKCCbr+OZ02Q5PcLg5bTZmt+zwWCaEIaI87VQmbclcMFrK3tWyWSgQXNks7imVgZMQAzjjro9mnSjIZmvNoICBC/6O/P/tzOZUeRGGsFYXIqHq9MnBOXpdzqt3ZWKiGpWrH6Lf+Ojc4Wc5WUCy3aQXzxmbgBCAfmT9XjBkVY9oLpxMcgdgwhhHjDxiMH8bpCbITMsNqOj8z9ifWiA2FKRrtVWJWeJ7klXxWgOgH9rEZxT0f9Z9UY1p4AJyMDl+Qt954BwbqIxLeHJHc1l7ZAkDhFHKaGKhIN+TBrtgA2OAwBvatk625aTl44bB0DG5U4Mfarh1KIkyGcbzhk8JIgDHD95q7BtH0UH2LfO+VtfiXZumMEUijMSiz9bt/+js5fvhD2bYRF1UYyOfTMRjAewCiaMGiLIPSW6XxFvE6SjgPXJCzsqCAGVfsC8pPMejHoUxD4m5o13PKjCpyMgh+MFpbN0kiNaxAEKetqiSwdKNCmpzzfdg8B2DFeFyHhqQQi8q6desx+jup2iTwwTbtZYBenohYy2l1uAjKRJHfFmtELBtDUk6ps1wxQLRwKb+Vj8Yvyl/DeHK5GSAcZUBw8E6NdyiDuZDKFFUxlX4KqzkoE3XmzGEZHxtQzIuNkDRyOrVzetu2h2DW/bZiYdAgXfeyYCGZ/hGUP6KsFQvp9A7ZuGGHPPnES7OeHVYwCVhUz8zMgA2Skd9/+dvyyCMviMettU8Wx/lzRxXDg+DL157/c3n2uT9TAAfv1SjD5IOcB78vv/rl38vHH78hOwAC+ACE3EwQ4GDfCXIMDZ5T23zooWfx95xihjC4zXBrJ/bPqqSxrNhPemgsZAKv94ESUqnkNIy1J5X8FUETfR8X62cj8KEvy2PCvHwCJgf7+NWv/pGSCyPAoefF6fIqpg1N1JkXglFbtuxVvxWNceDA01fllEwRsj3IKiEwNYLv9a6dj8AUd40Cbs4B/Lh48ZQCxRolxarVKpa9qEAe7uPmzbsVUHYthsti+71c85lvnpczMDcq1+XxlqsvS93u8j+2LLWnxnJGBowM3EwG7hb40duK+1AAIEbcWgZuFvxYLubH//WPb0vz9I15flD2iswPI4wMGBkwMmBkYOVngABDPF2Q8XgGCgtzNR8nPFCpVnI7/E9vJQsriQFC8CMMxZxZuau6x/Gt7N/9tC4ZIXbUVcmeMQCQlX3kF61SlmtaATs9PaX8JFijJujBUl1zGdJXmEaJqDEwPRj6Z76nx0cnCuA5EwxrG7wd6BfCArIdI5297RElo5XDCHsXlq9A3miyMAHwQytiE/zwO8zKw4Hv41hvaDoPiaKLEtmyS9qU9AuAD7A6JFZCUQ5FbgAffCWdgu8JEOhBz4zxkydlCEX+SEef7HBsgjfH7Oyr3mS7IAGDwn9jxAAWhAt+4etC8f4A6PRjmiZW+84HpD1jlons5GyRf/46J2ta0ZzTI6aQjNU/870ebU4UYNc9p5gSZD14Ojul1dIK6KMsDl9NbOaKWHQGS5ksEheINCZIlk1IYhSUFcTFS8Py6jvviT+zUX4Co3dGb94qv+N7fZD4cbx1aT8O/zb6mjzW95AcvfKODHIZVs3rYFHaB9Co3jkCSGxnGv4pjGAsIYf4nmAQg4cRf90t8I/ZtVYunjkl7174XAEoPQByat4vo0zpLIAeE8AUGp5bcV44CxKrwTQe8BelpRjxGTc8TUyYVx+xlyUbJH5VodNpnwQAohZXPiAEPSiPRUkthlYU1dqj/BUBkGnIEbngbeOCBExrWyv8BaJKE5pgCQEfa2VSFcT9fT1yGqDZzIxFOvshD9cAftAfxIyR48lqXH588CM5FhqT75UOaB1Zxn/zxYqcGZqWsZgGXeldoQnYjrWtYrfeugwWWWAsjq/mINOA5ueUbiKAsXbdA0IWk16UJyuiq3srCuWvKQ+K/fufnQUCWPheaPQ/PWCsKJDPlwQjY2Djxt3wqwireSzkT02n5Nz5I2BPjMqGjXuuAje0ZUxKiomskvffe0WBNJcunUDxfc9Np537XCzS+Dyt9pkMlsbgDRq9Rl544S8BHGhm7QR7rhcEctJpXNvRHgELjyc4awJ/vfUWmse8kGlBFgaZK8zL9u2PzcqPMS/sI+WymBeCJCovAKk2AIBqjPZIn1pXB4y4Lo8t2R6UsGLe6dlCgIeAzUZsi0DQxPil2fZ0k3OaxTP3BHnISGkEyhbaj5U6bQUMgrrh1BgMkBtOmbGCkYFVk4G7CX6smqSs4I7eCvjx6IGFPT/uJPNDAz+ab8jw3AA/VvAJaHTNyICRASMDC2SgCNDj2Pmo/OitM7Mm6FysL+KTP/7qZtm1Thvcu8Cqd2XSSmGA0Ni8pWawPm71oNMU3YiVn4FFARCLaQ4tJdDBoG+HlwbodfBDf23OAizwaQyQKrwpJDVXaA0GMToZQ/0JcljB7EjY7QBBMCofbepeIf48DMUxbz0Kz3ZfWZoSKHEUIFlFegKWtViaUeyHaTr8SCJgEaxZtxkiSWoWqoYlZQzuBuhB8INyUSUahJCBBDIFg4Vs+mmUtvZLOwr9D1nWyVc2Pq2K2xjYrGSZVEAGS/lrIHIbK3Pv69qBnEcJLf2VHhz69ggWDI29Ldub18iz7kfF6ZszYK9FrVJxxGFC7hSzNyuTl2Hw648oD5XO7n61venxUbV/Xvhy0Lw9S4kZmMynhs7Le7/8V2nt2wh9+m9KM0Z7+8Nm5WvSbCqi0JiGr0VMmsDg4KhsizckVzA6fXxYk6eirvsopK1avR0YwQxQCGGFJ8dmv08mTJNSGYxJR89mTLVL+7p98jkAovflY9nl2lgXOVOrIGZkc41eBCm1Hsec8xg0RzUeh/6+Dctwvh6EQ1xWv1jXt8nBo7+Vr3X9saQJcqDfjfussY1s0lxnKk4maxLoDsNDAx4wOTJByDBoliQN06dwXpSwIM5DUmgIaHB+JotpNey3SRulRfaHQHKtVIYngu1ILQAAIABJREFUDVgdZILwPZf1uUGNhNQaT5GuYpcU/NrJMh2fln7FZeF5U9Ykt7DNyTj8PADO0QekHR4BIxMa8Kf7ftAY3d7nki/GRuXHxw4qQKunB6bCNk2uazYhy/AmmSnI6wcH5VeHLs5u3YZz+eEtnUoH0269fkF7Gbq8LJukbweZBmQDUH6KBXVKW+nREgrLju0PKr8IGqRfuvSF8ovQi+JLHf1PWSh6frS0dMy2zWI8TdJHR6+oaX4wPcKt3V8CVcygV1KyiGAcGQt6wZ5Ay40GJbAIHtAYPBzul4FL5xV7hRJSZJZw+9xnMnt0uS8WzOeDOY3b5XwCCAQIGPQTudXIZNLKL4XRBpYFWTCNwf5oUk4ds3mZhoQdgRg9mHPmlOtaILfYuA/NuH7QYF4PerF4wI6hjwols44f+wCsn1PKG4YskPlAGUGxri7opNYN6a/q3Cr5UOBv+CoJgwGySg6U0U0jAzeQAdzyy4XBK/L+sUsSxL3o9773PbD4tqrfqPmhRnbiXu0Xr/5CDr7+utDwfDHPj//x0zdUM5S9IvPDiFvPwM2AHxI7L2R+XA/8oEH9D+DR4vc45C++9azs2aLdk8/v8cTYsPzTb47IBwfPo7318idf2yNtkQZ6fsMK9H3hOWAwP+Zn0fhsZMDIgJGBezMDrP1dHk8JB9g2RhH1neWOhe5t7mafdMmrLvN+zevDYH3cUvqtNajGFIOoSmr1wVtqzFj5jmVgUQCkZtIW0UGOAswiCH7o0leczlDzWY9GwYvLMGguragenA9vkM4OmMrGY0rqKgMghMG2MskmceYdMpy/LL0wl+6CiXcmUZKMiQwAjb1B8MPjcYrLZJctOx+XDT0sSFakgFHtOVCNcmYAHpkyQBBNGorgBwEPhm56lIaxNaWoyFoIrVknu6oPqXn8I4HFlgPoAWpLM9oRGJTnq9q+cxfcJYfkmzW2gGR9knfCByPhlao/K5WEVQq2tLznuijFqZRilqyPPAhD9j6xocjW7dNkZLztVZnKJiXbnoCBd0iC7R0K5Ai2tSlfEPY54+kVV5tfGZ4zCKxMXCjLpVMfy/kzl9Qf4/t/HZYy/DKSE3V0B9NKiWmJwZBbjy9OfTH7nm9KsSlZC+P5rW0tkh5HcW8T6H8AmyYm/HISYMjjOzfgfVQ6UmDiAMA5MXZZPA8H5XFXm5yeLkiX/Wq2Rk+mDRSvMUlMliULfwk9Nrb7Z9sdhkcKzdMJ6KTcSfGlI/LWBz+U83j42ZSNKF8RvbRM8EM3W2dbCiBBNAP8YBDYCAQ0ZhDZIHHIqGngh5qtgAmCGi5nBq88d0B3BAWEgAeDoAeZIDa7VTq8zRJDsY+m9aEAzj0QelzOGSnU1d6yWe0cpoE8DdQd2HeHpx2tAIgBWykBKS6HA2wcnJMDUWzHGgDQ0SfRclT+8dKIkAlk6vFKj+sB+faORwGwLfpV03biDv5LsIxB0KPRCOwObnJVNs0CN02t6aNBLww3zl0yQgD9zu5PMBiZLbLrZug6ALLUnaZkEv07KBfWWIjP4hqRhBwTw4trB/1U5oMqLN67wE6glFapiPMaQCIL9jcDgOj9JXviuef+ECBhTL44cUgBK++991PFdunv24Tze6PyBNGNxRfbT/aFgASDAOytBPPD3xb2jeH1+Bf0mSH7xuOGMGA9L6nUtAJiGsNmd1zFFLtevwjkEKCiBwx9X3QZLBrLU/5qePickr8iw4WgGIEiI+5OBgwGyN3Js7EVIwN3MwMnL8fk2GkN/Pj+X/+VrFu/cUHwg326UfCj0fPDAD9uz1G9GfAjfeHcouDHR2euKPCDcTvBD54DpmTIYH7cnsNvtGJkwMiAkYEVmwE+J1ARJQL/j6/sWSPZ/BwAEoKOv9+tPaMu5w4sNwPEWW2XdeZnxV1rk/mm3suZl9W6bUc1AM/pjZI1jxsyWCv4IC5alTXRxLkeBDkIbujgh84I0cGR0lQchWEN2MhA9oQACGWrSMPwYAR1Oj2HhnEewY8w5KvGfEkNLKmrJs2UmqRMeZViSbE+3M0shAOkGB+T7QceU+BHpVJFgRzeF5WS5NJAdQFa6ICH/krD8IKjogq+ZA0c8l+WK1GtELdfwgA8AGJQmoseDygocllzBRXwVFCm67JH3HW2V2oGsFIv5qVNABnYjA0gS6okBUhQTUGW60RGG538EMCPXR0PSQtGLXG7XJ8F5xqus+4yivZFvzhQ/PQF/KpvTi9GoUGHVhx5qXng6wHwoxyC3BPkuiYuXJbk9GVJJSj+BXWtmYJ8+Npr4sc2H9qzB1p9NSl1tmNUcxBFxjnGzflEVgj4MOZGX1cxujwrtQxMjf1VWff/s/fmz21kZ7bgB4LEvhLc900itS9VWqpKpdpLtXjp8vLadrvb7u6ZePGmJ7qjY2IiJmLmD3g/zPzQ772ICb9pv253+9nu57arbNfedm1SaS8tpdJOUdx3AiBBLARAYs65iSRBipsoiaRY+SkoAInMe29+mQAyv3PPOQA/GG5HgfB4xSIhiaBgyPja04fl0r+1S2v7DdlTXyjeJBACW6F6342iLR9Hwejw4tDQDwaJVNt566rVI4EUgh/eMEAfXxy4kgZzOH0eeMLUSc/bJ8XxyOOQH/NgLGCxxGskZdds47k9wRCCJgzdGN2dwf6Nk32SjULtuCVDmhwZi8CxBWYw8zgAPoN0WhIgyJjYS2ulxO2S3sGEjIFVxIgqGa0wiCMAUCABFAoGxWJzSR+MouGQoJ57AYJYrPQ2oLZWAYCWKcz+1zxC3oDXxztn3lKsD4IfW8uekCP1m2WvpUT9AK91OO0FcuRgrbTUztZ844WBV5dQW+tBrnH/NO6mz8Ply2cBQgSVjNJvYI491+uCjB8CBPTx0M3Q6RFyr7P/ySpgJPB5ZFjhyzBf6GwHnbEwAfmqdHr2zJb5tltsGffx0UefFjsAl0+PvS03b36umX9Dtu6ky6XYMJtb9is/E66ns0Hma5PjM0NLlLJXBAcogxUM9gI0ALNvmQyJXJYJ85Jr6D7X00MfA4EiMjn0ICBKYOhegjJYBH+4/7kyWGSWEPyi/BVltVpa9k3/RtxLf8a2y8vA2n+jLm+cxlpGBr7sGSCrgzGBa/r5IhzHRCZcu31xIyRd3TGpfWS/Yn4sBn6MjIysiPnx4mPbDObHfAdhBcvmgh/PPfOaeDGBbb5ob70qP/vR38lywI8HyfwwwI/5jo6xzMiAkQEjAxsvA7xPoNH5tvoiGKG7UZCemTplhez3eqh9rCUDxIZaa1X6gAF+3OdT32eqlWEAS2O6BNF9bt9o7t4zsCQAwi7SkaikUQROYbZ8ruk5wRDO/J+AhweL4FHQfZwoM7OYrkcSNzxkcQwMDKuicjSep7w/CsHkyEzEURyPid8VkbwhLIdMEfWsyBKxQH7FAR8GzrDPH4GXRWoK0kzNsnXPQdU0v8TSKPiPgtY2gTq/Dnokg7O16ynpxOiDrv6lvA7F/tgKlkVzdEZ2huAHgQgtcPGueRFLCrJaBTQYR/E8NwhQxPJCMhJOiCmCWcnQ9jpr7VZtb3IWSeUgDK9rNNCF2+WNu8Xl1xgpSYAtBWmHkEtS6IAZMozMCX6k7BrwwHEoOa2BhPTc/ELOnPwIniiRLDMGDJpJgEwAHd566325catVnnruGSlFkVFcJsVC6E1GwZLwy8SA1l7CDsbEaAwsFIBRDqsUY5Y3YRKyMmj+ZAtUSEdrqwZAYXl81CIl1S5xwFj4oHO3nOy9IOfrNsueLOjB/SFg4naUApjRGDH5kNFKQ1qKYRmKSbwwoJgcfJ6P50SLbieAbiHFUTAy6hu3ycXTH8soAC0CIEM9FVJceV2tnyy+U4ZJN0cnIyQSB/hkqRTHZFSsgw6xOB1g2SCH+AtDcs3uLJcxeK4wkqEIQAsPCsM8P8YAEnnVsSRIksh3KSZHfgYMEbym8TkwMTBA4mjXKREvyGvxKCSKClEIB/vD5lVm6vRgUfsJtgkBkgmYz5vKnfLP/Z/NYn083rhPvuKvlsRIr/zo6pvyH5/5UwkEZnxdVCOrHPT4oNblWutdrvJu31V3ZBlcunRUAR+M4eFB5TnB77Hc4Hcal1EuK9cM3UzJunsInemRC2ws1JwuL7XQ+ytZTkPxfTAOp+eJMvfuuaVkn8h8IAPiD//2S8V4oGzW4cNfvQMYyu2TMl0eb6FiylwHiNLT0wn234iUFM989y40RgJRBEsYNBObD9hYaNu5QAkZNvcCDpHZU1e/VYE/J0+8o/LB/SC4cvXqeQXu0CidhunLBXcWGruxfPkZMBggy8+VsaaRgbXIQD6+e0dxnfzp2asyjGs/O5iz80Uc3m6X2vogLzguLfsXl73i9ncLfiyX+UHJ2O6envmGaCybk4G54McrR+4f+PEPBvPDON+MDBgZMDJgZOA+ZIAAA2trhZ47pZgt+Xf60N6HLu+qibVkgFCqid4fBvPjrg7ZkiuTTeOUEtgYGCyQJZO1RissCwDh2Ah+6NJWfE3gIx2NSz4eCYoU44+FcEdMYx3oIEh8gvirVjykwXllBQymUUynWXKUUlNhFLwn4LnhyyjAgwwFbhNHbR0vVaRQ5HbZ3PL440+gL9xQwcsgnkwDTMFs/mhK5oIeE2BZjEMWpgDyMR4AFBHMfNelr2jYvS9dCwQAYAsYAyyeW6bBD2rIa4wN9qvADwAUlL8iKMFHBp/HwPwg+MG+upxh6VKUEJEd8rT4KjTdr/FYQmOfaGpW6n233aIMxMkIyRsH0mJPqtneZv17GcSCUCosoz2tcuniaZj+ajdjmx/DmBE+85Rigfhc2o1kKC8gHgAqfb19yqdiAuBIKBRSwBH9OJo2bYb01YnpG8u6Hbvli+vXlXTV7c4pqVetApjAGM62DUlztSZfRTbIV488JZ1vdCoWyOFtT2ItjflBDUWCIEoSC6wQiWkMDDJ6QgWQIYP5umoTuFESQAhBDwIdfMyNzninVEizOJzXAH5opyKBGYbO/uBznQ1CuTAG4bXJMbOE/DjX4AsAiElc7inMXC9SxzPiqcIxBaCGd1IA7lwmGMXbscw6rGS0CgAU2WwAmnD8KYHFczsKAG/0xjWJgMHU6KhWYEUqO5bq8hIFsBEQKcgPYdsixQBJF1nlajpPfnfyt9Osj2rnJtHBj1jbeflvv/2ldA4FJX3oO2rsaxn8keXnJsGTJCfITnHjc7yWsxDWMi9632QZ9PV3y4ULpxTosXvPk/LCC99SLIb5goyGjz56U7Ek5pqhz7f+cpeRaUY2BgEWAmz0BKHkUq4Mls6IiGQ/a2QpkC23nCAYQMbI3CDjgkF5LYIU/Nu583EZBdtreLhXLn3+qbz99j8oj5BPj78rW7cdxPe54w55Lr1dggGlYFrRV4QACBklBFVopr4YUMB96+xqVTJklB/btfuQ8tzQ8qL9KHD8zAuX5waBE93MnRJjBCopxRVPrFyCS8lgBUpkS8tOzfcFYBD3YwLfGUNDkGoBM4RG6TwGRqxeBgwGyOrl2ujJyMBKMuDGhJ+nvv5VuXrlkgxOgpk3d35STqN5pQ752vefkn/37W/dV+aHDn4sh/lBAIZeU5P4jcw3zw/WrCQPG22blYIff/2nj8u+RxY3PP9///m42Au98r/96TP3zfPDkL3aaGegsT9GBowMGBlYXgbSUIz54vaIfHKhB/eCM4oAlSVOeXx7hTRUrK1p9VrVXuj9QdNzi/JZNuJ+ZoCAUrl5t4TyrxkyWPczsfexrSUBkHQGRtsw5RZIPCmpIwSBjgqAGWFLUhWj3WqWv1bsDkD2KcaiXD8ADhTVPTDj5j2PPzOJYrPm4yGZBEyhKUiEYjbuiTipPjmimf6SJQI6A8gC45C+sogHn8t4fjGKYLvFBbPqKPT7xrIgSy74QSCCodgAbAHFL3DqZQI3YIPwHOnyaGPfb2rR3lNrAYjAdpmUtg0RYp1JQlkqBXgAuyHgoYdudk4AIwlgJYK3TsEbgOyP593bYK4OlgzYBYz8KB4h/6RLYOlSXHyPQIsZ4EcefERsFWPw/9BYLJThIvjRMaDlI1DnAdBiQ/E9IZu2bsaWN1Bo05gXjz6yT3ZsrsEsaxbevAoEEV+l2rbcWyr5gahs8sfFDiDm3IXzUoLxVtLbpP4R+NOfUWOcGh0QeKkrFk6SYBT8O5QUFkAOB5Cal3Y3yn87d0Y+IQvEVCh+bB+AdBbX0aPCMSW96N+HRxlOTrOEeF6MZIEhgh86S4TnRXnLJrnWeV7aGhqlVIoUcOKHeX2uB4jePsEQF44jZdBMYzEpwbEd9+B8Agtkwpy9o46A9WKeADCFnSnUJHCSYA3ZJv1giVAqKy1pSPtEIF1EEa1EYliKioukr49gQDsM2gGC4FkVHqsqm6QJs73tMD0fhJRXCfaXYEmxs0QVVBOYrTjpKZJ3uz5SrA8GJa8IftDv4/nxoJx855fy98f+AJ+RcbBbGiV/DotI37fVfIzgc/O7421y6kqfJACEMGzwJtnZWCzfOLxJAt755ZZWc4xr2ZfG5rgiHe2X8L3ll6ef/oq8+OJ38b01U9gmK4HMBkYsNgaAIqkAEN0Mfani/lL7x2K7E+crzdEVc66/EzNoh+7w3eBYBwe7JRaNqrEWF5eJA8w6Bs3Lc4NeFTrgQHCBBZ4xAIO5rBaCY+PjYQUUEEjxAqTmNvwLFBarv3IYj9NsndJfC41r7v4RONi9+wBkwj5SeTp27HfKVL2yonZe4ITjC8H/5JOPX5d33/knmJ03gLlVpMbDvFRXNykz8kF4H9ErZW7w+AwN9ShZKkpvFQXKlRTXvQTlvHgO1NVvV2wWMn7a2i6pXLAfGqTTKJ1MFSNWLwMGA2T1cm30ZGRgJRlwOF3y6qtfAVvwKbDwlpYidEFq0e8vXHAyxkqZHwQ/Wmpy5FMX2JlAbQ1+Y6rFvMzJBAs0s6EXE/z48IMP5XfvvCn1O/fJ3TA/FgM/dNkrt9ckf3OfwY/IaEb+6sAr8iwklBeKc+fOyX8++kv19ms7npIqnyYRvND6xnIjA0YGjAwYGVj/GaBiTEffqLx/FnLymECtR02pRzZj0u9aAyBrxQCh94cvr1bMk8ZkjwdxFhtm6A8iq/evzSUrQ/mmKSWBNRQdme6VheregnHJdKVlsjQuEbxmRFHEJgBCcASi6KqgTX8Qymclp2YkZOibwHn8nN08hFn1ZA4kIY0VGR8RR6NdpuJWCYAS4S10CObqyxRMqwMVjQr8IJKbTN15I2WBn4YKsDLo6WGNp5SfRjI9KZc9VxRA8ehoiZT4S6c9PyIoGNKkfEb+CtuDpEDmByMX+NBZILnm6pTOaq0GUAH1JLe7ROqmmjA2zQPFjTG7C1A49eSjKE+Wwuwg+wNQjhRkh022CCM52I+CZ4/4rHmSBvvEOuiVkfEhmHQDAKlqlEOejBw7eVPiMDN/4omDUtq4SW0XQYGeQRZEwApD+sQtqa5pkOHB27LjyZmL/mAwJLUVLTIwskXskzclD0BJsPMWWBgegDcEGWaCIIfL3iQVebdUoX9Pw6vTb1IiKxLDjmeDIAi9QYCU4Q/MjJhWNFaMjuw+jmZlsriJE/4hBAYuRK/Lk/AYsVyPSAjAmh8yWFKo02HAZikCb2iY5542NspgTYLdE5sMKwN5PSbMIYkP8xxLia+gRBJghJiD0P/HEtSIUezFbPAkCpTARyYABFlStRIMR8VWNiFDX4yIlfJu+GPQx6aixAYmjQtsERSUwRaxQbqhs6NLGpsa5d/GeuTta+9J/+BNBXwwCH78qblatn98XT64/qEcvQizerRR7NDAMLXSGkcCoF0PDNvPXBsAu0VjgRSgwG23QiIIRfIve9D8/Ny5j6fNz5ub96nCdy5bIfc532tu2StVVTUKFLh48ahs27oP3wU5lK85SdVZFgvlmsV2D/ySKLlEYINSXNeunZHGxq0osM8AGxzrjZvn1FjZP4vzZEgQALBaLar4PxruVyBJC8ao90u22c0b5xXIQxCFQYmnCTDyzp79SN577xcKfHntG/8LgMC66e308eaCK7leGwvtD31C9j7ynGLVnD71nvzh979UIOIzz3xLagBmkOmijy2Jz3V3T7ucOvmWAj8GBnplx67D4vOXoCCWp+UFQCzzQi+O69fPS31dC4CimYs3SlNdv35OMXg2g5XR0LDjvgATPO5VVZvx+X8Ex/p1+fzS6WmjehqkExziLB4ePyNWJwMGA2R18mz0YmRgpRnIh8efx+NRf/cadwt+/ORX76sulwt+cN1Cez4mEsyeQHCv495I2z8o5gfBj/8E5ocCP3749fvO/DDAj410Fhr7YmTAyICRgbvLAOt1unKKvqU/q6Rydy3d/7XXigFCiSZKNRnxYDJAZg1zbPiAPJj83murSwIg7EA3O+dzzt7XPR+iXhSrUbf3eeMALQqlwqs1R1NrPySNKJll9ybFDkkrqxVFbsuM1p4CPwBmMMgs8ZT6Jc+uVY98RfniGk8p8COEDnbseBRmwGYZhfRTbEIDPxyTI+CIeBTIoQeBjAm8tgIE4XK+1o3PKX21zdmiGCIZfBEyCFK44MOhMzS4zBXQfDl0wEMtAxMklIYHCkCVCGS33ClUxVErp8n4pUHNV4TsD7aXjEMOikPEc/p65A3hxg/1OfaTG1OuMUkgRykf/E/gEWJ1mVXb8dS4eMHiYKE/f4L5CavZzuAmiKMeniilB2U7Zh0Pxtwge9SjuIc8x+G3AWkwSjQ5TAmxFxXI5LhXplIwlwcoQQknRuelM4rJwFtRv6cUoNSYVJjGZMqrFekrykqV9wcNzClxRRYI47G6WvkV5JzO1wflsFv7siyIgG1CUw+2BVYII5Q1UO+N4fjDS8GNjz4ZHbr0Fc+bAdOw2LgposZeI719FyTiDEopZnmrNgCiEeqg7BWDBI8CMC90HxAuI/hBH5AMCvcME2aCUxKL4EemFKAI6roK/IBGP4EdFng5W3w0nBQzmCPjOG2tDi/yhhnv8A0hAOct2ycZf0ydi8XOgPIHaWz0w7g5hO0h24bj1wWvj19euSC/7n1H6zfL+qCnzNb6arn22zfl7PEzEgYtqAvgh2J/rCMAxIbPAymf+1pm/+BtqsInLUcGTu3clywondTRfk3JNKVQiKenQyUAx8WkmigVpRfFKQt19cp5SGh1KnNwK86pggK3aqu3t11Ja3nhh7EcmSSCAs3Ne2TXnqeU58TRo29LYWG5bNq8B9+hFsXgoDzU6VPvqqO0e/czyrNDH2tpab3Y7XYFjnC9yiqyKIqVZ0V39w0lXxXHd0YuY0FnSRAYuXb1NApWhfLY4y+r7Rj6tjR8Z9TUblbvLZYftSLXBdDxR6/9pYTBZPni0in59a/+K2S0bsi+fU8pgMICo3fKVpG5wbFdPP+x8tXYf+CIvPzSn2SBGP6GOGTHziekZcvHWTDlX8F8qVDHQM/Luc/+IMc/fVOBJHv2PiG1AEhypcP0Ma3ksShQLM2bd8rpk+9Nj5HgE0EwerYY4MdKsrrybQysaeW5M7Y0MvAwZWAl4Aevw+4G/HiY8rEWY10p+PHvv7O47NWDBD8M5sdanClGn0YGjAwYGVg/GaDPx7aGIvmLgm1Kfl4P1uVqy9ZW/opjWQsGCOWv/BDBN9gfD/Y89efVQyPoc0MG68GmeUWtLwsAYcsEPpwo8jCUeXX2ufYYF4IW+eZymFBPiLd/Kit8hPL9cFqqYOZB8IPMD5NrHHpz8IOAd0deBEgBcAEWoLm9if4e4bTaxo/lBD9sMLX2FNdOgx86+yM0Sdmt1Ax7g/YScyTw+UWnAxQHireJK4qZzpz9n01VLvMjV56KLA/2PTlkEzPMy9MZfEFiuqn+xUmZKsaVst5p9scOy1bFNqH3iJLf4ujgG1EAoCNrgaL8RNg22R8EQMyQx2KbVshPqQgOyWQ+pLe4PYy8ewAijIwMqbfq6jSZsQIAOPWb98lej0/KndCgj2P2/hgYEpCJKsi3KgCFUABHGIWfihO55zgCpZskDIPv4dCE2CAJVugD6wZMwN4M4ZC4mAcy4m1MyChYH/mTY1ILZkhfEKABAAkPAAgGvUD2gK3BI8OfDIIk8YjGDEqb2U5QHTuBnwQZQwQzGJS+0kEzG1g8jJ7oI5Dj+kw9JwvkCJgrjFwZLN0HJAWKTQFQVEY8UwbGgsZaiGfBsJJAkcQgbcUoiE4h3TCcR7GXRVX+OaHNz9nxUcyaR1larRce61aG5vyjhw2bbx8ZkQxYIT4AeFHQRlJgGhEISYH9cWmoX97tvyZHe0/MYn3Q78MxGJOf/4+fQoLstlQQxKOxPYLghwJB8DyNY7DWQZ+P1w43ycsH6mcNxcZ8QgrryxyUs/ocHheUaWLxfPv2x+C7oX3mFssLi+I7dhxURXHdDJ2sBLIWSsEKuAE2AgvywyODUlFRrZgPPCeXCoIGL7/8PcUyoPk4GQ804baC3UX5KoIUjKeefk2OACSg9BaDTAkCJY/ue1Hef//n8uabP1EG5iWlVYpxR+kqjouyTWRJMGhoTpBw2/YD8vSz35SPPviVvP7rH8mJE+8roMMKgGIsElbm55R82rJlnzz77DeWlR+CDwR0du96Qn745/+HvPH6j9XYjx19U4EYzDXBGgIyBD0IGHEZ94vgx5Yte9XYGGyLeTly5DuQoYuqvPx///X/lKrqbSovBIp5DBhPHv66yjWls1YS/NzPDe4HAZuy8nq5cP6oApDYNwEYfYxztzFeP7gMGAyQB5dbo2UjA+slAwb4sfZHYqXgx1KeHw8S/DCNBgzZq7U/dYwRGBkwMmBkYE0zQIZFVbFLKoruZHfmLSXLsAojXwsGCOWvbHlrD/6sQnrXrAtlLI+CLGsF6ck7lYvWbGBGxyoDy6p65pqfE/AgGMI/5RsB421GfNSCgveAOFEIt6Bwz3ClTJhlPyzxqig4AAAgAElEQVQhFN6oZx+zuMU+ZcFM4HEgj6iTo1BcANkhymSNRVvVNrHoGGbt40nGL9ZRmH37kooZEUnmSR5OIEjuoyCnVtWCwAdq7wqUQI2ZxuQWSMXApUGxPyK3BoXsj8o0iplgR9AZPAmJLJqRzxe6Bwjfm7AmIOZlk4QDbAR4dDBIo2Pksj++nXlZrOVmBYDkBsENBXBkZ9cT/GBMWEdQhp9Ba7gOxz2eMSnwgyyVcQAhBBCSU33SsLsBM8C1Ql6KpuvRhDiqrTAxpyE3TdQh5ZTtOJkAg8RdJnwc51KAIP7CZuQYMk2QxIonAEbEOkDbaQEg4JHxVD+2dEPKzAQmj0uuwnTdnZcEWDKuwI8QjeoRu5v2yYXWM9Jj80ltSYVKRyTBY1+sQC/FGMGxIJCFLhXgwSD4ocAzkDZG/ABiQtr5o4MfFXmFYIEEJfb4lLTEytU2um+IeoEg+EG5q/6xDoAxwBesmiG8JHuUN4gFniuWWo8EByDtNKbJ+kw4R8SUxHkARsdEMK5m5Ct2CHxDcoOAUbQHhso+bXkhjm8C3jEMyorR6+OjeI+8c+atWUbnlDw7Ur9Zrtzukl/85h+loC8kFTlsj14vjjCWMXZtboE0mfY5mdX5Grwwo0AOPGc6+PrLHvSdYBQGylThvbKiToEIS3k6cDMCawRLXn71+wqsJFgwie+p8rIqefqpr8sEPDVolE3ggUV6AnMERx577EXZtGmnYg/QpHtu6KABz1v6ZhDEYBupVEQxS1q27Ac4sEcOP/WaYkjoTAyCBKUl5fISwAOyIi5fPovzeADfETHFUiArYj/AEbIt6KdRWVmjvDV4DUgQ5dVX/1L5ZlDiif11dtyY7tMN9hfZJocOfXUWMDF37PO9Jnvjkb1PKVN0sjT09rk/DIfTqfaLgAsBpT17nr7D94TrMS/MHX1BFsoLPUcOHHxVgSXMC4+v3WaXnTs0AInt83VucP8dYIUdPPisAot4THVPldz1yCh54YVvKRCJx/Oxg0eQL40lM6tB48UDz8B6ZoCwaDsyPASwrOIO+Z8JgGv9/f1gWg7I8NCwmqBgxMOTAfq38bqjqqJcyioq8L2xPn7bH54MLm+knBUZCgXld7/9nZx87z15cX+xHHn6MXHacy5gcpo6d+WWUPbKYH4sL7/LXWul4MdSzI9jJ8/Jj37xYGSvDObHco+usZ6RASMDRgY2dgZ4LTGECcB9I9FZEvpO1AGrilziXWMprNVmgJD94Zwy5K9W46y3Ybq4JVmIadlaXXE1+jT6WF4GlgRAKA2jm5/nskCiSVS0x31ggmgdpfK7UezGTN7xG2KC7JAdhtoTMDSPjKPojz+3y4bn/TJUVCg1FW7JD+VJCP4iPhQQGeFRvSBll5pQWkoCM0XBKACQlFbXVsUC1gt4E8q/lAcvUHg3ZU3QyfCYgAbSQOWgXLqNQj/i1skzUlSaxl+1eu2Je8Vdu1XbPlt8YFsM/ZH95Jqi60UKHeQg+4PgCuWPSvLsMkw3dITO/iBAQkksPejxoSS6soAI2SAMHfygGTu3jQD84O1dj39EruaDfdLgkkP1u5UMmX2KjIYpaOg7lTxY/+1O8ZXPljPqw+xwiw2AAP4IJ/A1i/wERBiRsFdCwSAMwbXxjkDGKXw7JNEdu+RaVaWYUg4AJyLXuDLu6znLthdLytgaAJDjibB4S59Qbfnq/Wpb2NWrbUwNms6zs3tIYlV2cXSj+Fs6BQDDTVUqaZgAAAFcJGmFf8dAnpLDKpFG6W0NTrNLBrvIEEJkfUB09gdlr3RGSLF3QoKj0awxOtoLhcUWmD0fmIDJJGTTGARBwphJX4LiNMEylymjABynIyqjA+OSXwKpMQyQ4B4hC6cNs8lhCpXOi8Lo/LNZRudby55QwEcvjLR+9uHb0n/xMwV05Epd6eAHl33r8ItS6W1WUmVrHXEYn5+7Pgzz+eFZQ2ks98qB7eXY7zkUqrUe8Cr1T9CAslUvHfmuvPD8t5VpNsGP5UgnscDOQvtf/Pn/NT1afdtDh14B02IrCp3a91AhAAauS9Dke9/7W7U+DdUXkpFisX/nzselqWm7YpDo7RAUoZE4C+8EFuaOk2wE+n7U1DQpSa5gcED1xf7pVaGbuj/zzGtqOcdLMIfjoDn51772l/LMs98GowkA8rgG4rFPAjcEdjj+hcasGpwnOEaOqw4ABwEbtj883Ds9NrZPppY+PjJZ5u6X3uxSeSEAlHv89OPL/dJD32f9Nfef+/7Nb/7VguvwDT+AF7bz6qs/UOfJYsdvnjQYi+5jBmZ/49/Hhu9DU+Pj49LZSdk73zQAQuDj5s2bcvbMWbnZ1qOA9kjEAKDvQ7pXvQk3PMZs8JCrqwnIrl17AZhuN4CQ+3wUDPDjPid0Bc3dC/hx6ODeecGqaHxCyPx4sODHnxmG5ys43sYmRgaMDBgZ2GgZSKan5PKtYfn5B9ckDCl9hhWqFxUBp3zvhS2Y4Lu2k9hWmwHC+19/2pC/Wo3znEbohg/IamT67vtYsiqbydOKop3xTrEFJ8VZ7FcG1o4YlqNSbclncU0rfNMLJIaJ9KneQaX6ZDNrXypJMEA6wI4wOfvFnw/r7ygK0nG72EIJCaN+T2CixuGWzt6IYguw5OYbC8OnwicTkCSahFzWdGQZH3ytgxIpeEhMjGQUcyIWH5BT1m7puq0V+DlTP/L0LvlMYNLtztI4UP6uznpM6O36SjSJpel+5k7KnvOa0lpsm9udly4lgxQrcUhla5GEPJ1SMdEgIwAtLAUwd88q3iTT9PlIgX1C8AaMD4AiBEoE64AKM22gzvJ0zxe3JH37ouyoPyC1yE/HQBcq+VpD+ozDnq4OBYDQoDuRoD9Knpjj/mmwg0wQAiGUeWLwMZ64jll1ODblQbmYgJn3hyemd/nimY8lU7yIZEy5R8k8vd2e3aZ9elP1xBLQzgOlj0WmTXpMLN7sMizqsA3ObKBIHNq4ymP1qt3zMDh2IBeUwWJoPiCQ9ZLRafCDMy8Hbt5W/iYxT54mjQXTkMFusJEAqkziePjgXUJPkCFpE4/fKwkc68kMwTjQR4DUTHo0tkcUupB6jPdrUmN8XdxYrczXfwYz5r4MmCRZo/MjZc8rrw+yPt49/WugS/BQGcW5qTM//KUy1HNLCgD0ttTskf955wF1fMYm1wewMI5z7/ilHnnr1K3pc43n58Gt5bKtsehLC4DwmOsF+pVIGREMmA8QYFv1dc0K9GDkFvXnW18/F3MfuR5N1QnQ6O3MbWup7aayBvdzQYX5xqDnodBfrIr9+rbL6XO+ccxdlts++yDArsfc8c3dNvf13eZF73exNu9mHRqvG54fi2Xzwb+3nhkglVXV4veDxWq1Kn1fFnOPHT0mnxw7jckcbikvb5TmTY3w+ypWTC0jHp4M0A9pFH5Gg4PdcunSdbl2/Tdy5fIVgLrPgFVXg+/59QzNPRx5NmSv1v44rQT8kKGbQubHYuCHzvyornLID7754n01PNeYHwb4cT/PHv5+Lfadttqzl+/nvn2Z2lrsGG70PBjn8MY5wis9j6kk094/Nu1tq2dEl9Vfywyt9ncoGQm6/NVk3oQoqSYjHkgGaIRuxPrMwJIACIdNj44APR5yQNKKGtzoweT7RhdAkXGUIlK3pvfQj5nCodCgZCI48DDsFUisMKKZgJSMkhFSLuNhACeTKZnqjckleljwfbBKCKz4PCwIwAAdIEi6uEpKyfQAIyMZjGrm5hBVU+BHFgzJjPLDmxCyKCjqxUI+C+p66FJEneUU3hIpr6gH3lAk/U7NhF0tbNfXXvzRNBScBRJ81K5Jd7H4n2wfk0CdVuyMJC9rDWWZK3yhwJgkDMatAFtAoFGROwE05zsodPuUJG8OSM3O2QwPblLoL5TYqGYSnm1FhuNaOYjSMBYbmCJgfJD5ocAPzNj22QEmIT92m1fJYNknIrLLVinlJdvkD8c/V+yHfMhiuSKUxNJCkyPzoEiTL5lUoYzZp+RXgHvIejjo3A3j8HIlcTU41qtAseiVmZyba4tlIBYU7xXt2LLFEXiLZLKGU0VDGRku1goFpn5tX25Gh6WuHwVRtCtBzUOEYAilwPx4Xe4iQ2NQuvoGpbezU51brsYGdX7SBNoCtkkwnpbCMpcyQSerI9jfqxgsTp9HCm3woAk5lVTXeAazc12axwMZTkNRDZzZ/dxL0gOGytHT/6SSQPCj2rlJ6PXxbGhY3oPR+efnTwPkuNPgnMerOlAnT7fskb17HpGCJNg3cSAuFu280/O6Hh4JfOhhz7Kf1sO4NuIY5gMa7nY/WZxfSTsr3Y7ju5dtl7t/K9mn3LZXY4zz7YsBfsyXldVdtp7LzPnmvGnmxwDkrv71V7+Wi5/3ARDdIbv3HFZyc/d67q9uto3emAFdLrq6qg7A1qMSArP04oVjchxyPt093fK973xH6hoaFy0YGplcPAMG+LF4flbj3ZWAH5HWG8sGP9xekwF+rMaBvMc+YtAzDmHGXFFRkQLz9WDBLgVZBrIa+T7lgumZaMT6zEAB/FWLcQzdHkiUQ7qV1ydfltAlR8vKymadw9z/9OQUfD4TkAQeMs7hdX5C8Bx2YwIsJxY5nK67OofzzSYph//H83vr4Bc7M6E6AC9cH9Rp1jpWCuqsZNxz5a8M8GMlWVz+NsyvPQ81wPU8Y2/5u7Oh1lwSADFNgbGBArEeLHSzWJ6YaocprjZ7l+yQXcUa6EGQIwqmSHSIP7CDkoDMkS1aJCWV9eIAm2MAc/aLrJoWGn+IGZkeSGNlTTbqUbxW1BJECBdVpfAIWTBILkAxO+OGbJQmJQ+VtZjstLeIKU8rqps+uo6Z/5r808RZIiYij337q3LgwIsABODPAc8H3deD75GRkUxNAWIJylRMk1AaSdsAGqCwbh+WP0SPy4WLZ1Q733jkjyUwaZcRM/LD7vJLJPrFGNgJ2yWvQOuL68VQmNckn9qE/IrSjE3JP5VmitRjbgy23ZLeqaCSVXpk5zNS6dAYGeVev0QAHtFMnubmGZgS101MSrhPk7fhgewbhY9GVRmeacwKvV0as1NaywLJBrOrXIhx9A4mpLnRjSOkBT07aqDxzwsCv1M7LULREvWcBufbG+zSMVoqO7pgwNx5XgQ4z7ZiyJiBuZPv2yKuTD9kzCqUvJkbxsM0Ur88VCSu6n4JJItkxDIMsAxe4xOaz0d+jVe2ZcGNUXuf8ljpHbwp22zbJXytVUZ9TnWeiTOgGCE0VedftAOeJpBUcwE8u82NwNy4DfCCRuaV1Q3iGgvKKP7cLr94kxXwE7kpFcxHZjvAEfgcYBvK7UiyChfsEXFkr+l5fhLQOZ5ulf7em9PAByXOnjR7peFsu3x2/UPpvtiGnbDNkrxK4hCWhOKyaetmee7Jp8SKsUOXC+bOLg2YwozR9RBep1W+cqhedjZqwI8+pnKYg/E9I4wMGBkwMvCwZOBhuJ7MBT/ovbNly17lY2PEw5mBXOCT4GugsFgOH/6aVFY1yInj78iPf/Iv8pc/+GMDBFnh4TXAjxUm7j5uthrgx9/88OsG8+M+HrMH1RQLw5e/uCyHnjw0XTxmQbmnq0tOnTktV69cUjLXeWZjluuDOgb3q92pyZgEAgFMztsrjzyyF5K2GkP1frW/XtsJA6C7cP7CHecwfdi4/PRnX0hsfNg4h9frAcwZF89hh6tI9uxskX37H5X5QK35doNG59vqi2CE7kaNa+bOgcBIoWeRGuN8jT2AZavJADHkrx7AATSafCgzsCQAoktgRYc0LXhnJWb6Q6ZqvCcqURSTKYlVHbTI9eAwwI4gkhAUGwr7eujPMwA/GJxpTymswslKCWZNwydiqEzj+qkEXgzOCTzJAWQpeaRCBzqyhuL6MqpvWQFXCIzPk+mo9PZ3y+dxOFiAabC1LaN5WWgtiM8MtN/mhhZ+uVghB2QhcJLjy6H8OQCI0LvDFAHjwYtZnABDXGSWOCBflV8rpf03QCHxyHPJ7bK74jHVchO2IZiSgt8GxN6yRuYvaMBMdrwTlZPTJumWKPYRPhZ2eGLEwTZg0FtkfGRYTl37SIKf/EpctV7ZsWm3xuQAY4NBU3NXEuwOaPpPwPgccLj4s0yQCa8VUlmN4q6sVEwZsj4ka0Yf7h0DIGCWUb9NRlv7pAQYyWi4RwaTxYoOyGOTto3CF2RKyZCFARCQ3eNxAtSCCbrfOQbwAybnWHdztR8ACLTwXVnECeOqwHY4esqgXA9uV4HdDA8XygiAI3q8lMz5oZlIxSQNOTQyf2rsNTBDvyAjTQDc4K2Ck0uK0X/Up4FlHCPPHXM4Lq0wMvebnTzqYqp0gphUosARdzSADPklTh02BIEO5Z2S9Zmx23C641DSD8TuoOlyvpq15K2rVr4k793+FGiVxvrQvT6eHjZL9O135fW2M9IxFJYugB+54bsZlxRm1nz1lSfF3bhdbKUlSpIMB1MSMGePxSAHl9Y0J2dtuAYvbPBE2VYXUH9GGBkwMmBk4GHOwHpmgDCvLBR98OEHivlB8GP79v2YwLC0dJrOMlgtltFq9LcafazFuax7J21q2qm8kT75+HV547e/lR/84Aeq2GTE8jNggB/Lz9WDWpPgx4cffCi/e+dNqd+5T1458pp4S7RJWHP7bG+9Kj/70d/J3TI/DPBjbibX72vOtt62fdu0lOPQ4IAcPfqpfHr8GORL4SlXvktKywvVhDMj1m8Gkqgb0AtzaKgXv08n5NOT5+WZwwdk/4H900zV9Tv6exsZWS+79+yGjLA2MZS/M6dOnpI/fPip8mCrqGiUpk37VL3AQp9QI9ZlBnLP4fd/f0FOnDonhw/tV8DWUtdaZFiw1kez8zT8QPJz5M8Jgqx1rCYDJFf+aq332+jfyMBaZmBJAIQMEN0fgWCHMj9HEPxQjwBGWG4eQ/GeRemoP6pYH5wVb5nBQVCgntnNTspIZZxC4MMKhgf/zGEsIz+C11GpYbA/NDaGKiaDEJIC0OHCDSalr3Sjckzn16SwuAl8QC6ePSetpZBZgo9F3qXbMhnMkbjCOuHJPCkFy4HFbz0IejCmzcghDURwhHJRSZTT+UgGBSMOf5GrtQkpvxaQ3S34wYSJqR6p7DrTC7JP9PZnLQf4wS9juLVrj3hTN1dPwDjejTFWO5sU+6O385KcPn0aMw398JJPwEC4SezWAqnMAkMVLZuVx4QFLIPyimIZh2fLBIAdgb8KQRB97DQzjg9G5dy5j2Xr1iIphplxAr4phRmNkUFQq7NXGyXBj9wgmAEBK4ngkaBIClJiF3HcNzWRgjOzbng4rYATBlkgETAxfEX5aHcGYWc/ZHYoVkdSk8/SziGAR/BIH4gNS1HWZ4aghy6LFnOAtpg9h0xB7F8xpMB8haqtKLZju21dNwCaBMD30PZp1KLtEEEQV3+XmHHcdSiCIEnrWFIma13SdvmiXIhen2Z9UKrsb4saZKq7R9584y3pwOMwpNn6csCPajBozrnSYmuqk+9+++vSbPUptoc6XxHqmODTRZDFwfMbXg5rHWT3DIZAac+agHE8BaBCu+BJU+KHBNqXiBa91sfC6N/IgJGBe8vAemaA8Lv24sWLcvazy7J506MK/KAn0EKgBgGCNCdR4LeK0iLqu7nAoorq+ZhgsdB2K83gg+6P7XPMqdQE/MlmrpM4Xht+s5kLfZ2V7sN62o5sEHo0Pfroc3Ls098qv5eXXn7pDsmN9TTm9TQWA/xY+6OxUubHX//p47IPM8qd9jtZxDQ81z0/KHtlgB9rf5zvZgQeFI/5x+jp6ZE33viNAvQ3bzosmzbvkdKScgXqG7H+M8BZ5hMTuM/vapXr18/LW2+/hcmAo/LikRc2NAhCz9Ta2lp1gPTfmeMnbyk50sce2yPlqIWQlatP0lj/R/LLPcJ4PCp9mOh88wbO4XfOKgm+r37tq4uCILwe/+L2iHxyoUepuehRiInDzz1SIw0VNK5du1gtBgjlr2CHDMWWO6X1127vjZ6NDKxNBpYEQDisrXsbYEjtkp6BjAz2aD4PnIk/Cf08BsEPRggz8yVH0YkgiB5OykRlIwnMYcKugR9cFAT4wfAlusVtrpTIpEfcaCgWNeE9MDZAAnF43JKKJxT4UWAHEILnDB0MufDJB3I1BB3H7c0yMHlZCk+1y+AcYJcMkJLSKrWdHgQeCHiMAfjQLvMwtizoYSKokXNxd7kITAwAM00w67ZDGkmwHqWlTDGNoTGrYbyYyy7h+7rclhq3PwEWQ4FMhlDQN49IDABS5oWAFLi9srkKfcCnxAqZsKHWVmmDjBfZK4yiwgpIN02IHyAIi+4pUwB9AezIAxKAMNF1He+RlRLA+Fx1VuluH5IbN89JaOSKRLorJT5hkrLiZsUwcZUBTUAQRKgLVIvdmcTx1sSxTAUApuD/oYEgajU5aHHLyWRQPhlolfqoZ1oijdv7pyIKJNFDN7bnawVWZAE0vqZ3CMEPnks8dypwvHrLR6TW0qDWNZu84vKkJGoBaIL1HU6aiZN5ElTgB/0/dECFwAe9QkKSlmKLU7FAUqMZsXts0gm2ywTaCMQa1bDGYYrurrfLNUeftJ65MW10Xpoola1ltfKtonKJXzwpR3//yQzrwz4jY8XzmkyQ5ze3SHVBg9TXIJ/tA9oYHTVgfcTUhSXBD6dz/cidjI4n5a0Tt+Xji92YDaRdBFiB0uxvKZc/eaFFAl5j9os6QYwwMmBkYN1ngD/vSUwYWI8Ri47LsWPHJA+/zY8++rQq+C8UBAkGBvuku/uGtN/+AhKOGtu2KFAOb6sdkFdqvK+eIewvFB6Rnu5b0tZ2SYZH+tTQvB6/1NVvlypce7CwtdiYF9oXLp+cnFQylMPDveijTQYGbk/vE/soLa1XklFFRRXos3BdTA5YbH+W+x6BqsbGrSgU3lJm94/ue1Qqwcg1YvEMGODH4vlZjXfvhflhgB+rcYTWrg8W5/r6wBzIgh+6lKPdjvtVgNz3G5xfuz3d2D0TpGehf/OmXSj618jZs155//fn1E5vdBCE+6j/znz08RXIJz0vu3c9MUuO1DiPH47zn987DfXNCrjyeAvhv3YUA//doiAIZa86+kblfUiZj0VnFDlqSj2yq6lozQGQ1WKApCfT4jSVGKbnD8epbozyAWdgWQAIi+EEP/TitQ5+6IXrDM0VEKZCu7gTeRKB2bQefI/LWeCeZoSggDzoHJbqTLVigTDIHAnjdS14IDGYW/eFJ2C+Dd+KQszaR+25AFgLgY/JIdBBUIzmcwk6JAXGQbC9XW613pTEdkg6RWC+fqpbbJh5RFhC9/9wZ+mPsUibKjJU1RWDWAIJrCzbw4p68ARAAyt8R6wADRi6d4Y5PyZDY07pAuslOTImLaYdYsofhSQV9jnL/JgcM4vZo7FJuO0EZtXTyYQgyDRjRbUKtS6ASakCAEATAZlyjSkZrInEpIQ8ndIRSSqjcPpZcByB0k3ZrQCeJCKShvxVwmaTG91hlZ90oBTSYaDNYF2dHaP6znqbWNzwOMHY3Z52sFy6pdYfgPxVQrwCr4uhSgmbw8pEXGNuIJ/53ZJCTWkqDjYO5bwmXQpy4DlAYIOPNLmXgRkJLIIQeiTTpRIf5Z4HpTeWpzxBrrZp7+vr6ecRDe8TWfBDsYdI8egbk0RTgThRAzI1AzDp14ozBETG+b4TgBj+GGQmmei3gXAXgg4Eo3TKag3FekFnBXsH50axqUmq8qDFheMLXo9YKk3igEH7mcun5MKAxvowAfqi0fnzZdvkYEFMrv387+VSW98drI8hGJ8zaHT+g2efBlnJJu3BIbFEbYDrnCjyhCXcekviaXjkjEdwXEokmfX/YFForSM9NSXdA+NytWPGmL4AzJSKAD5jhoHhWh8eo38jA0YG7iIDcxkgc39n76Kp+77qrVtt0tM7AgDjKUgvaBMT5uskEgnJ5Stn5J13fiYXz3+M2Wmz2RKF+L1+FPJZL730J7J5M2WWFgZS5mt/7rIEpDOvXj0n77z73+XSxU8kGJr5LeC6dvzO7trzlLz88vfuKBDMbWu+12z/1q0rigVx+tS7AHWglzlPVFXVyO7dz8izz35bgQYbwReFBRTenNfVbwGodAnm6BchEVNhGKLPc/z1RQb4sUhyVuktg/mxSol+SLsJhYLy5jv/ppgfh5742vTvkFEwfkgPKIbtdvvBfnhR7cCbb51Qk/U2MmNRfce9/57kgh86gPfwHsUv58j17x1eMz6y9ymVBIIgVtt78vWv/xEUN+b3I3LbLTA8n3397MdrC879tY7VYoDYrA6xp1AvnHvztNYJMPo3MrAGGVjWJz8WhaxUOE8SPb2Q9tH8F/SxsnAdypGIJfgx3qExIpxuzcyc6xL84Mx5rm8L2GHFMCxdpi5xJp2KSZIcjgo8qBX7YzQ8NJ0K+lk46mjsDemEmFcSxaMKBDEXAxUpjEmqOyWfHH1TOrriUv5sofSDjRI4M6aADx300B/ZRnIkKG23b0hz8z6pAmhAtscEzMEpZ6VM0bEOAYSJNKSBuIFig5jRb48CVxhlHo1OqV5kIxf80JexIENQYm6YfXEFikzBKJ3sj6Q5qozXe63j0hW9KS/ad8ByxKfGRhbIzieflM+PHlX7FIynxR0MSwcKF7XQABsHQNDcvA0FhIgUVVRILIQ9AJUlb1zjs7APSoUJ1KBcAE4mYwlJ9MakvLJYOjt64MgelZijTNJh6F/mDwB80kc7Jv3Y3TGnRwrSVTB21wARngsOLGNQrmpPUSExC+UjojNJ6MeRX+AVRywJD47ZP0YEPZScFYLr83njEHI8hnGH+qQgNiADFTCI97WodZQROoLgid4+DdudPfky6hzR5NnKtHXIAGHTQ/ld4sm3i68APi6jMJRH0YizXvM8MRjW58mJMx9Psz7YNv0+/qbAKa6+K/LGWx/Py/rQwY+WmmeEfcoAACAASURBVD3y/KYDYgYLZiTr7ZFIaDMK+vr6xYkZrQFvWoEfFotFMUHoAZJOz9Au1Q6tQdgAxm1rLJTg+Awby27Nl01VfuSZ7BojjAwYGTAy8HBkIAmPjWg8b10OtrOjQ+lLk03B2V3zFYv4m0Tw4x//4T/KjevnwIyoQPF8m2KpTkwkZKC/U27fvowb959jEkhCvvvdv1UySyuVUyQIT3DiX/7lv8j58x8psGPLln139Hf04zcwISOudN1bWvYuuz+CHxcufiq/+Pl/AshyRh0XAh3FxQ2YqadpxI/BJ21oqE0BI93dP1Hj+c53/3pFYMu6PPAYFGfXWi2Vcu36NXnu+ecMGawFDpQBfiyQmFVcvFLw499/x5C9WsXDtGZdUTrm/Lnz8tmZW8rHiiD8cnys1mzARsfLzgALyARBqIhAxuKWLc3wwmjecIA9i8uckHIMNZTt21+cvtaY75ps2ckzVlwXGWBthyAIFT9On/4E16tbZdeuXXecwxZ4fjTBv/YvXtqm5O71cDlsUo4JoGsdq8UAof+HL+/O+uVa77/Rv5GBtcjAsgCQUGhQgj0o4sKwOgPGRtqRJ2NDmr7VXNYHGR808NZZIfpOsYAMQSONCWJCwT8rj5XErH6P2KUR0ksOzOwn+yMC02+3zauYCnoo8MORBVacBFA0SaJIpEc+v9iKm+xyiQwGJa/ttmJ/2LKMj7lJ9TqskpzqU4ZgBBgU2wPgB6Ws3Hgk64P+3gRCdHkrSlP1Sodifzye3zTdJJeXmGcK27rfBlfQpa5yPUYot6UvJ/AxxY5cMGNP0R8ipRgmmc4xqSnV9PnUWCDB1bJtlwzevi6Ra5DBAoATsedLIf4GezulxBGRREO9uLfuljJfgXRPAlzpKZRJUB5i2J90Hvwv8CXPmARwQvCDMQjmiyPPLrHRKYzphoz4C6V8htQBXUUN7OG6tryY8vJgEPzwZyxSkQd/kt7bMmbzaSBFFoRQKyHSKPwDF5EabOeE6TsBDOXnAckvgiBOj0MxT9iLqbJMyhrhQeHbKqH3fyumqzck3aD5eLAtnTlCs/LR9i4ZJSukslzyUk8AFPlUGqo3S2dkQOs4+z8ZLZ1glxSnqxUAQubHNceYHL19Qq1hqtFAnD9PlcuB8YR0XTwhp79ok3AkPsvoXAc+uM3h7a/KgdrtmKWrnYeBqaT04di5wRwaJ3MIcmUBr1t8AGgYI4Ot6hHIC3Tcl/VR09Z/QP+7HRZ59bEGObKvblYPNASz07DECCMDRgaMDDwkGaDJ+NSk9nvGYj4B5/UQvOGmLjGjKFA8L/iRwd03Za8++ODXCvyob9gkf/TaX8mBAy+KBxKY9AHp7GyV3/z2x/L793+h2Bo7dhyEB1idAiRy9aoXupGfq2lNWSoyMwh+kFny9T/6n2Ag+Q01RvbXB8DlPTBD3nzzJ2qdLVv2KHaGGdc5SwXBHDJLCH6cPfOhVFfXyuNPfEUOHfqq1Na1iAOTTBi8Julov4b9/pUc/eQ3uHY7ptgfC4Etc/chdxxL7TffX+72y8kn+15qPfbpcEDC1V0IBtClpdL2pX3fAD/uPPS5E8ZWY8LMvYAfhw4anh93HsGNt2RkaFBOnbmo5BG3bNmr7qUW+t7deHu/8feIv7179jwtf/h9vxw/fhK/1fUbDrAfhxrD8ePHxZxfK7t2H9oQbNONf2Yufw8JyPK4Umr1ww8/wDUrJtxkfYv0VggwVBW7pKLozmvZvMUuEpc/jHtaczUYIIb/xz0donvaOJ7R7gfvqRFj4/uegWVVPTODTgkO9YgHJtL8yw1KXo0BffVAAoiPBEQIfnA9/TXX98ZcajnfZ5AJQkCFjBFKaU2wduF3zwI9uB5BCgbBD1fKLumURQNCIH9FBkj/0HX1vqXRLu3OKUnfHAAoYJXR2IzOH98n8KEvc6LwTsRYUvDJUFuDEpr1/eBzHcigvBUBiB5npwInGPU2jY3C5w56npitan1zOiIx06SU2DXwgtJXOvsjF3GmP0cEDBM3wI/c5a3V/TJyulV2O5sVwyQ+nBJ7UYFqOzwxpZgfjJG0SSx4bocRFPdnNMs+KCkphkk7wCcYEQ5B54+FhpA5KNb0zDhUA9lwTSXgA5JUIEQCimW2PgAi0ENkkOXBUFJXCIIJvbEyqXBMKRks4ueP1dXKr9qCcjwRlsez/lH6dpTByg2fNw4QA7NBCwH0FE7B8L1UmZWTKaKDIYrV4fNIY81WOTd8GqbcKQmENFaC8g4BgELwg74hTosmKTIV/XTa/Jz9qXXwqLNGyGix+yol6BuV42lIft2+OT0sSl59K+aQTbcG5Q8db8oxEwbIU5PSatnQwY/iykapsdeoXA0P3lZAR0GBH0BgiVjDAM8AfnT1DaLoFYSxWp3a2uP1AACBlBjADwdkMdZL8Lw0582eNZ1vNokJFwhGGBkwMmBk4GHJAEGG4GhYDTdQVKS+c9dDEExI4XrCZnUCPFj4EotM18GBbsX8OHz4m/LEE68CmChWRXbKM3C27bPPfgPynp8pxkRv7y1lKD6J33camUbHx6S4pHpevw4CEt097RLGRAYffseplzwKNmRvb5diflBWi+BHZUWtmi3H/li4pyTV5ctnFSjT0wu95MjokgUDjlcDc36lAI2iohLIafyZvPzKnynvEnpj6MGCC30/OG4XJg6889ZP5drV03Lt2hkFtuTn67ryGbAmk2of2m9fUbNUkzBwtYDC73JCIhQeImTDWPGa2uYMgkqh8LD0YdwFAMPIxEiBeUnApRu+HPr2xcUVkCbbPu2rQhkygj8pyFV6fcXz5pPt614tQ4NgLrs8qv/5pLu4v7pUGUE6q/XeZMvWwzl9v8aggYNB+d1vfycn33tPXtxfLEeefmxeA232ee7KLfnJr95XE1NefGybtNSASr4Bw5JzjownZiZ+PahdXQn4IUM3hcwPA/x4UEdl/bXb0dkJs+EB9bvA71ojNl4GysuqIdVYJzfbbklPV5c0NM1M8twIe9sP/5orly/Jnr3fU9ceRmysDBCQ5XGlf11b28eYONQpW7dum8UC4XXH0GhcOvpHlaS+NTtXivJX9WUe8c6RxlrtDK0WA6TI1Lzau/al728yD1VmQ3JsXZ4HC9+dzzNcAhpzg8ui8K3QQwc58s12MeV4gVAWi8wQAibSjbVRE56WywIjq9Ch/TCR/VEQLJBIocYC0dslA2ScDBB4Zyg2CN6PjUWUjFNFS6kU7CpFgfuEBACyuH28Ib4TBEnaCV5oSBxvZvXQmRYuU0bGM7MLwYOT+WB/jCv5q7Ioxg5drNhkWNwZs0zma14UejtcFoNfiAMeELlB0CPjnvEbAcVFknFIX2W9OiKJpFwa7JAB24B81fS82tTlgywUWDJ8TEM2om8gpDxF6AOSHMko/gslscIRmKR3nBBPZheW1ahtJ6wwVAc7wT8Bg1E3gBvgGSzck/2R8Y6IaRTSWXk2iWJG6GTSDQYIwAOwM2JRs5SbqtWxmRuuTD/8P7SlBDq21LdIRXvH9Oc6VxorNKCxPbh2NOFTbRO00KWz1Pso5BCooIeHYnj0I2+QzFIyV1l2EBkf0fCYYnywjcGe2+IsplyTll+dVVLgfgZtX1NtcJ1NjzwuGV+1TJnGpLX0TtbHN4aKZdvQpJR3Dsob10/IJfvsb6dc1gclr5qKGhUIFIu2S2xCA8BGx6dgRA/gDlIlV2/ckvbQsIzh/Lk6EpFKnMtJUJzMlmK8n6eM0FcqWzL3ONzL6ziMz0990SdftAenm7FbzVJV5JLHd8CQdo0vAu5l34xtjQwYGfjyZIA3NASeg2Htu6y81L+g9u9aZCWZnPFBm6//KfgxFZdUyiuvfF/JWzU375m+Oddn2HJmWyEABLe7FMX5myjgT6iCPv9OHH8HLI1jYFo0yTe++R9mSWNR6urGjc8Ve4QAy5NPviLPoIDlhWHk0099XZmyN0Bqi0bnBA/0/li4VwAAgAMCIPxto4cVgQUdZJh/XzIokp2XK1c+VW/vP3hEDj/1mpQAaGDMnTHM30IyWV588fsKJGLQ7J1gkb7u+HgYJq0fQbP7N3Lj2ulZXiVkr9TW7ZAnDr2sQCO/r0iNj4DJDYzjjdd/DD1ou9rXGzfPQcLlU+nvuz3tr0JZrv0HXlL9E8QYHRtVzBfKce3Zcwhmmn+pgKjc0MCVEcyU/YWcOPE+iilPyGuv/Yd5AZBZGxovZmWAk0QM8GPpk2JyHuncpbda3horAT8irTcU+HHkmcfm7SQK1v2xk+fkR784Lm6vSf7mh1+XvVsb5113oK9b/vu757D+TYApm+RPXtorpeVV865LAOzv/vE3EhnNyF8d+DN59uCT867HhefOnZP/fPSX6v3XdjwlVT5tAteCGxhvLJoBgredHV0Ana1SU9O0oJTjoo0Yb677DPB3n75VZ89egjx4+4YCQCjh1tnZLdGYG5MmGtVkjLnXI+v+ABkDXDIDPK6lpbWYLBPAhJrr0rJlq+TJTC0vmZ6Sy7eG5ecfXJN4ckYCi/6n33thi+xumn29t2SH93mFVWGA5MMDJV0r5kljQs59PnyLNjduGpBohlo3Rqy3DCwLAKFReS7IEcjPaEwEFuNtGgjA90nhVuvxD6DGXBksvia3QFtXq6YTFPEEipQ/BeWvGKnC2b4ZqThknOCCbvPgg9uPGfplmF3Z78WN8UfS8/FpkV21Ej5zXQo6OyB/BaYEzMEZZVW4AAbYQvYHo9gUAlOlSDIO+4yZaAptwzCbkQt+kNGhAI5kz7T5eYNXmxlhCjklAtN2luF18IT22NxmKmYWKySncv0/6CliBejBR4If3CYO/wx73CXJAptcqQ9LBEyE0kSplHnxBYV2GBn0kQSrg0HgIzyZJz7z7MJKd19Crrf2SP8lzCR9AjccAHbom8IWvEA2zPQhwT8asjJ4I8F5spTDcgJzsoFl0WmeFKdpRLqDAbEVmcUHloce02wQyF71YMwEMSrhS5IGwyRg9silDHw4TC0oPtiRj7iER8EAwszWaFQDmqIAnIrZHtCbnoHZQANZIAyyOsgEgbCVimKHS/mLlGaKpsGPkUmcOSYgI0NYnytlmSAlngrJB/hByS1GSWW9emRcdA0q1ocud8VllLzah1R8iKLI3+usD30DPM5lfehgC/fLkj8GVkyhYnr0AZRyQYgtnsigiDIDhIUjYOZAXq0jEhbfpA+zbb3qBmI9xDhkz85cG5TXj80wYSwFeXJwa7nsBYiYJfKsh6EaYzAyYGTAyMCiGQgGwQCBFCep3V58LztwDfGwBEGA4qJyee65b6sh54GVp4MMOiM/DclMsjwikHcsABjiwSy3AkgpFhRYUJBqlk8+oYzUZTAuymYV7UeCA5oJ+cn34KtWjxv/JjBO4eUF+ZLDh7+2YH+8CYvFRsHCHdb6c2PSApi8i4EfbIxgRWfndcVS8UAqhVJdZJwsFQRBCDZQHqsA10H6JAF6iZw48R68Sv4f1SZ9SsgocWEfxpGPq1fPK4mujvZLitXx4ovfVf0y6PnF5WTDcj8Y27Y9Ki+88C31/PNLp+XkiXdkZPinStalKPDn09Jpum/Jo48+dwcAQsCqp/uWyjnHRADEDpDFiOVnwJC9WjhXlFHV2fUEdWMPiAWSC35UbNkmhw99RSx+L7z6JuGfNyExfEfojwMdN+RnP/o70cEPMj8IdMwX7310Qn76xmcK/Pj+d78mzfVV8647jnuDueCHC6DrfO1ev909C/zYD9bWQmGAHwtlZuXLxzHBLoTjVRQoV785RuF45blc71v68BmkZxk9KzdSJDGJY2h4EBLpZcbv9UY6sHP2hd9NRYESdR05MDhbDl1fNRKHzGswJsNQnGGw9sFIptben3U1GCD0/7CYZk/anpNG4+V9zgDZH+FMh8Ty+lF0vc+NG83dcwaWBYD4gKTqBXgHZrN3jcbUDabdbpfiHKNzJW+VZYPMBT9y9W35PFcKywkCgMui0b7p/UFvC3uhV8yFPuXRUQBZonEU1wl6KB8Q1MJd+PKKjAGIwCzGgi0+GQF903wVZxj2iEV+hg/yDvHgqAI9xrK0Aj56ppqlBBP54ynCFjgvs6wPyl3p8lc6u+NykXZBYAl4pB6z/wl+WGzDYsm4Zp3Pij0CwISPZHvkhs0yLlP5Xg0EwRsEOJIJ+Hngz+wql/Cghg7uGi8Rs1N7j9uzbE6/icwUDUldABeQqDlBFghlsI5dPCd/vPcRyUfRorDaJ8NdYTVTVBefoFY6952yWfaqlHCEobBV/JU2KZEKgBCaRFViql1MQ9WQumqScCwIwEPrkOAHg4CIHwAIg5JWl+CAPtA0LLZESslcOdFwkVX7kiW7ww9/jSALOWBw6EGwg0wPSmCN9w/hWKB/8GzYlbesXALJTXIJ/iKlTUVgqGjbUYIqCsAlN/Q2ndmm+dpcWwxYYlgu9GqzUXXwg5JXP1Ssj7h82HFOGZ1LycxxymV9sI8t/hbVFVkmBGgIwNTkp2Rg3C2dWO4Mw0AeQVmskdGIYtPcgiwZX4PPhHMLRapSiwSDfZiFOwXZkvXz7ed2FEzLr1ESyw6QzAgjA0YGjAw8TBmg75I+MaO8vFysuB55mILAQi4zkL8RU1PazRhlrvr6u+X0mfdVwb0ejI1mGJLbwH4kkEHN487Ob8rrv/6RKsrzPZpBMi6c/0Q+gscGr89Y+N+2dd/0hI/c/shqIGuCwf6GR4YUs6QdxutkWWxu3q2Ak8WCYA0ZFJTLiuJahOMku2Q5cincj/mYFpT3+uCD1+V2200wNY7IH//x/6r05ynTlcTkEbJbyPCgWfvRo28r9siePdrMcLbJa9NhFD0oi0ZfFZ0lQhCjpWWfMng/feo9uXDhlBw4+KpiozQ375Xjn76p9r0Nf5Qf02WsuI8pTJRpa7ukjgUZJJs37VWSYUYsLwMG+LF4nrxulwJx0/gcEtSNxe681l+8haXf5Yz+Y0ePyY///keYNGSXrWAq3+q4gj/ca+AzM4HvC/1xDJJzZ95+XT49dkqe3VMpXb0D8sa7H93RSRyT48aSefLWB1/Ijf4R+cbhJum48oUMtt+ad93O3qD84mhbdiLXJvn9yct3rMcFA2EySm7KmbZeea5hp9wYasMy3L+kxsRb4Jn1yPX/re2Yus8zmB/zpnNFC9NgIZHJSNlBIzZ2BvLzCxQbM5WeH+B8WPeeXkpj0aQqjBuxsTNAkJZB5vJkOoWJUTMTTynzXQ7/jye3V2FywcwE64DPLj7X2p8bq8EAgWMx5iEb3+Wr+SlICgC3zHVJZ+/rVrNvo6+lM7AsACScI2AWi0bVDSaDYIgeOvMjF+jQfUD0xzzxyxQYAYoBgg1pVh6001Qdr+DVkGt6znYt8FpwlvokbzIhU2abAj/IBlGACKSwAiUwNnpmq3Rv80hv6xnxgCWBaYKqyK+zPkoqaqaNv/Wx2hJd0LrcK/WbAGIAtNAlrTQ4RFuLIAXmDagXlL+qTeDLFTfCBD94o2ABxsJ17AXQxEKQPcLXBEE0MS+9N/hATHklGULBHTfRar2Zt1Tf9BehwXqNu1aBIlMp7GfW+ySNC9DkRFZ7Kmc7S6BQqsBs6R4cUfrIoeDtnHe1p2n8CHDQBYECyTNrX3zMy6TNo3JiecouyWY3ZKXKxTcEI+9sC714rQWPEiSxENEhnZ8hEpzwCYVHxhuRk4Fbiq3RtGuzOAbyJFY6Jd6JaqnMtsCH3OeUtGLQ74PPKyFz5asqFm+39hjuHhJfabPI8XZtvazfh9oGzwlyRIdmDIV0hgnBFoIf/fCB6R/UGA46+HHIvE2+1QkmD27i/vFim2Zyvgj4wb4IzCh5LvRHwEZQ7MhEC6RDohJzzPyA3r51WVopy5UdJwGQmkIbLrrgjYMLrzB0J/OtERSa1n6Wgddpla8cqpd9LbOlAQpxEcD3jDAyYGTAyMDDkoHRsTBm8oMBAmp3IBCAVNTDO7uJYASZG5e/OKVAc53pQDYDC+4EMjZv3jNdlKfsE2Wm2m5fU4wGmqlTAoCFeoIHwdCIYkYcOPgKLok0z6y5x5V+GZSM6uluVf2xLcpNETigJwkN2ZnbxYIz7ygnOgZGJMEUymeRJboUa2ShNifA6Lh+/bzyBSGrY9++pxT4Qa8N9kVQguDEE4+/pMaqAAsAE9u375/VpMVSAB3oJ9Q+6CALwR9KudQ3bFYMkiEUVTmhIy8vX0mAlJY1KA+TG9cvqO10CS82THCI7JEUAJiq6m1Yn+bwuVdyC+3Rl3u54fmxvOPv9fvhSaOxmIb6Q/g8adfJy9t6eWtxRv+58+eUN2PT5hYZvH1L/TESfhvY4Inpx+GJiJz77HMFfuzaQub7lAIlPJYpBXjoj+CuycWrrUqOl+BHbRWv9hde99PP+xX48dLjTaqN+drk8o5uSLX2hWRfQ4UcaNAmIxH8YMx9PNUG+VvEi837DdkrlQkjjAwsPwPqdxXMUsYE5LiNMDKw0TJAo/Nt9UUwQndjkoE2QZr7aLVA9nUd1D5WgwFiN2nXFxvt2K7n/RkzdUPlB9XSjYUrr+eU39XYlgRA9MIt5ZdyAQ8+J9hRClCAykYERvT39VmZumm67h3icoCCNqD9wKp13FYU/DELMDAzDHqAxDHTj3M5Ha4iZVY0HmXBeaboPB7T2CIEL7q3w2y6vVUCZ8bUGrwQ14Nl/Oqdj8qVxB9QtZ/JSweKA86RSwBAnlYm55L0KTbHtOwVnrPthF+Tv+KWLTG0BjyArJOAzSTjYZvm0wFAgxCIzuowu9yiG6DrJuf0AJkGPwCSRGCW7rZpwMm5opmBuSCdNGWG3BhmfxVMRSUcn5ICFAQmhvqz+6VR9riPNCrzYZYmwR4yXngT0nrrmjQ/8oSUFbqkbzgq+RMAPsYdMuUaExtkJjhDimEDKNKDp9F4p2TawXBpv6GWT8ec15niQrBCtHHyeevQ7PX7IIPVB/+VclMAIEGh3NRNPGa3KmTRzOgsISdKcwmPAJjUczxuraqFfJTGiCGYsQuMEHqAMDoxXltwNpMiARaKCedLorxAATH9YJXkSl4R/CjpH5S3j1/QvD5ygA+2qTM/PN1oBwymUce4kuAaimIMwJ3oOZILeOjLCbzwvWZvuYRMPWC+VKgxMgh8pHHc8tfZzCkbfuy31QXUn34JgI/vugxzVuotvwC5BJsm95EDNpYZOTDOA+23NPezMDWV96WQqqDReBcMOznTn8XuqsoqXH9ov6nr8gttiUGRndDW9oX85jc/VkX9OH2xEGRUvPqVP1MFeYIeehBgoMzUyy9/TwZg4H3p4icAgbQJGwQPKBv17LPfxDrV84IRBFyGh3vlnXd+JhfPf6z6Y3G/vmETwBYCJ68qk/DlABkEXShbRbDEikkrOnNiiV2+420yLWKQ1qTZ+9hYCAyUvYrdMddonO1XVjbCr6RMBgbOYf12XB+NzWKrUDKsrm6zeNyzhR05S5DSV3ok4b/CKCqqkC1b9ygApAsMlKHBHiVRxv2nFBmNz7u7LitQZkvLzmVJfN2xg1/CBePjEfn1r38p//hP/yzPbfJLobtKKG00XwwN4Drt2IUNb3g+376TrRUIaEAlwcso7qceRPC78umnX5HtTz4thV5cNyddQob63Mcvrl+XwRvXYDxfL88f3LboUAhY8B7klUO7ZXeLNmFqvg26MQGpo5vMab/81fdfnG+V6WWFha3yxY2QvNBwSL5/4GtqeQ8milWqO8PZz8kIef/6aUxCergYgIsmwHjTyMAqZoBqEUYYGdioGSDA4LQVqL/1GA+aAWJDLcqW9hn+H6t88BOZ8DTLfpW7NrpbRgaWBEDYRhSFeYIbBDkYxWX+ab3a0REUxu3Qjc2yQTh7ie8zlHF31ieEJqU6EKKzRMgIKSpMAgzwTrM/gvDwID07GI1LnWpFCx1MyMsbFVPEKhNgSPSPdUB+qFUV5wmozJ2rSJ+L6scbpOvzs7h0hrY1wAIVBA2gd6nLXFG2inJYfK37b8w1OC/z1Cq2BkMBGDAnzw2uPzQVlMqsf4c+Xq7DsTL0tmmWTqYHmSSMaYN17ADBjyi8UAh88Ku6v6dTmZPN9f8Yam2VZNbbRLWBG5DPPjkqTbsPgvqnlbU1BghuN/Ixu8xvxw2EFpQWKzQXwwcERf6gBmwQSGDQ14N+IIzOdIGSfYoOaeVyshxKgv0yODWm3mNU5BVKb19QPQbMWG9oRMlFSWgAHZeq9vQw1yLnHUOKLUHAYhCyZXxfrY/g87AFM0kBdDDI5Cgexs0gGB8EG6onArAA0cARvuZybsO2LkSvq21yJa/I2mkFmHMUcloVCxidVwU96nxLQD8+LiPijaGIBrLM2Aik0uBNw7ERdEnAfyQKTxLlZ4LjM4S1GSFrD5YHpBw3X63Dt+Q2xuRq2ic+r12BIDEwlvrG77+cger8LoOGcCNjCRkdnw1Hu+BNU+J34LzRALa7bPaBrE7Ao6AgDXArJQX09UHwtR76c2OZkRfj3Jj5nKRS+ZjFt/aU7gfypZDT6DCYHx3t7erisrq6VkpKStbV99fd7j89QKz4DSkprQKYADAiFVHsFpp3f3rsHbxnk8ceO6LYHDooQWCArBCyQ/71l/9F3nnrp6pbMjiee/5b0tS0fVGWAmUhqJtMnxC9P/Z58uQH4nQVitPpngYB7nZ/Vrp+DLPTR8C2SCZTQl1yJyS4CIzk6s/zNVkmfJ/rDY8MQjIoOgsAsUMqzO0uVMbquaG/5vv69Szf98JfhbJWpaUV0zJYjY1blexWLDYGVsqZGSmyZkqKbfzP2EqPYe52EcjUffTWbyWF68IOn0le//DCgs12dcfUNfp3X9oqLTUzYN+CG2ygNxwOh7ggX8tCBYHIUAjXwbheu9/XZJS68UB+tr5uq+apuEAOEyMZ+T3es9us4rPD1xDAyXwxgc+fHZ6K9P4o8mBCZru/0wAAIABJREFUnHexzwV85tAeJXsXa5P9FGJyHNnqBDfcYP/zS6AlM9N2ixLx1cIAPuY7MsYyIwPLz4DOAFn+FsaaRgYengwQYIjEkjLKyan4XdWDk0ILPfAXxuNaxoNmgND/w2Na2pdvLXOw0fqm/weDsqZGrM8MLAsA4dAJWujeHe4E5H1ww6kC4AdDN0YnEKKZnFvUDSa/V/hebugMEXBHxBLHe2Ay5IZnKqUIG5lJDUghmKBYFFwGZwxYn8qlEGSO0LbFiwI7mAX+78N3fXT2bD8r2AP91ZiV+ORj4umvE3OOD4W9phhG7jYYolvFnB+ToRRmT+I8tZs0kCOO93ohCkVwwu0ukdiwttwCiSdGLkASw/bmBTR7lfRVThD4yA3KXzEavA0w/EBtHcX1+ECPFPi0facsmMZqmV2c5jL+0RuEwRsQymD19/aKt7gKX+pO6HrbUNjgDHqvFJXUY62P1bo2aP+Og+LOYEG/MN8s3eaEAj6iOByDg3mq4E8Ag8V9en2E4H3BIPjBqA4CrUGBRDEiMHFNAScANjLhKAon4OKAKULWDmWjdCkpgh962PpSsqP+gJLCqoAUVvhaq/IFod+GDpoQWDE3NSu5NAb74riSGBoBm5JHNyvWRx/Aj1zWx5Gy55Vk11GwUiqujcDhRIsajKkTYBlZH2R8NOGGHFNJpbcLYFM1pMKyDBl6iFh4/41c6OAH+0xmmS31NsxktPrkOvoYMQXFlsmT3lgn9ntEtmBmar7XD/ZHnnR26JBTdgBr/DA6npR/+QMAoc9x1iU1Jo0dH9BHN5fLD1/dKqUAQdZDsMClR3IMQBIYIAydBZI7RmOZkRfj3MA5gO/5XDAw9zOyEZ/39PRI2y1NvqWxoUrKK2YYeA/j/hLUoIxTZVUjivpQjoUZefvtq0rOiowOyjUxDh/+6ixGBFkhex95Ts6c+Rj6/m+qdfYfPAJPDLBbF5C+0vND74s/+f7/rsADyli1374iJ2Ccfhp/P/tpGzw9gvLqq3+O65/FqfMEAwjGEIyiFBbbupeghrMeCwENZIHQB4R90tNjvj7nY6LQ62S+NvNxDURZq9q6HUoeS5fB4n7R4+Tqtc/VkGpqN6tjZMhfLX2EedVPBnlVWam8sr9atjQ3SaVfu1/oCU2o57mPV6+3yvsnLkt1RenSjW/ANUqKSwH4uZSXzuDQAO6fcJ3qmZlAtFq7bIcvHO9p1lXkIqC5A8u9WFxXAzYGY2Tg4cmAwQB5eI6VMdK7z0AyPSWf3wrK747flFC2lshWKgrd8u+e36xUMdYyHjQDhP4fRqx+BuKZGbn+1e/d6HGpDCwbAGFDlAiiufktsDwYOtNDl7OiAbouhUUmCMEQinSMAFgIZJki3E5nhXDdgFcrunp9tlksELUePrMTaIDgx0Q8CyTgZns8HpETfaelDzP7GeUVLO5DyalJQ3EHbANSmuBN1KBcbEeBnhiBei+gyTDh2Sikl67K+6xqa4EJRQQ6dMAD/AO1nMsYuhl6dm08QDoKUYHGYyUOGJlrTIpJNHjNqRVm1AoABxyDuTcTWjGXXhexIq1z+n8ETLUw0u6Ri8c/FrJg9u1/RHwAghhVJfhyhtfH3NDBD86a84AcHsbszfHBTglA9oI+KYmhNG5AYXiOFHvdZWKr0HJdWFgoCc6m4i4CcAkCyCB9hmAHWRVkP/jNAFAyMC8H3BQyTYlzDM892hgyg04JhoOQDYtj5i2kO7AbZEpUuZBjhceUSDdYE8o7A5Hp6Uc7cTGH4zIJv4mstYpEO8PI3xZJh8Hqgd8Gg9soUCXLSCHAUQtgZbDn9jQrI9DiV34fCvyA/JYOfmwtewKOJXbpv3xZBj87roAPgh4MAh86+EGWh9sBQKm4UjE54o4Rieek1x4LKEBEz8OQdRwKXS4FipQUwxTQb5ZgtwYg2MAKIRuloqhG/JlKNWt2YiIs4+5KmfJYoZccl5p1Mls0DZmVeCItbX3aucu8FEDHvL48BrBsZlaEStg6CYIfk5DfY6TTVjX7mfItnDHBi4bcZfkAWtNpSpbMrGcsy0CaxsjLRs4BPwv8nHyZAJC+3j651aZJ6bRshcxhiSb/tE6+tlY0DBbsS0v4q6UxHurrWpTU0z//8/+tTLs/Pf6ubALjo6G+eZoRQeks+lhQgkoPghAEUPjeQoV6fo+yv0BhsfpjfZGACEEAxu/f/4ViguzY8SSMw/cu2A7XVUwR+IZRAiscHpJwaFAyAAqWI5+l1y8Xqm9O79Q8Tyi3dbcxlxGSuz3ZMFu27FGA082bnysZLL8vAObHDSV/RWP4HTsOqmVGLJ2BnHkMUlnskb112gQlsgZKvdpznTHAxzGwcb/MQRYb/X6uX7sil69dUyyQtQBA4phwZoSRASMDX54MGAyQL8+x/rLuaXAsKudvDkoQ9UTWPRhhqGFE43VrnpIHyQDJBwvanvGLfWrxiUxrnoQNNgAaoEezijUbbNc2zO4sCwAJTc7cXBPs0P0/mAUd6JhhdWh+IDRK12WxCHQQAnCgOJkLfnB73bNDMR3g/8Gwx1Euh/RAaiQk9PtIQkKKElVq/bS2zmPl++XXWQCEzA9ljn2lVRWtrR2j0psj1zVWNSnmq5MyucWsZv7zNYvg5S2bVCFdDxtM94LeUfBL2qfBFSn3KG8LDd6YXnX6yQX6Y5wJCr0xGIrg3z6znvK9mC/+f/beNDrO87wSfLAVUFiqgMK+bwT3nQRJSaRE7ZYtWUvsxI4Td7rtnpnTTk9nOmfm9PSv6fzImek+pzvdSTrtHjtxErctjWVRkq2dlEiJ1EZx3wkuAIh936tQ2Obe56sX/FCsQhVWQlQ9OlShvuX93u+tb33uc+9VzwtrRgEMs709dXLsow+lu6FRWR0ddfV66tRdA9MFkV1epuNC0/NQMQB2QkMnWCDQDy4eHYJZPJgf4xbzhsbxyc5MaGbfBplo1n0TQANBibQR+I4AG2Ein8FE/8SUJc/F+b4Rp2QVpkh3PWQ6QHcfxfI8JhxxPC6s/W7JH5cySFEwsrLwIgcAw+/KVTNxhgsHQMrqZLBLALQETMz5WwUH/TbSUmEqjhktkBRj0PtDwPygJJULrA/L6NxifcQB+mHQ64POMX1v/VryG3slpczql64fYH3wb/7u3L+4KcxH4mag8ZauzxhtmZTkIvwWNkDEfuwT7GD/G8chvQVT9IFGq0/cHzJAWtBGUVmZ+Cbrof92Q1kzaUSfVkikoLKvsjhDHt1eNqNHm6shNbKCtDHj40ODMSahxocFJszsCTb+PTERp3IpJjiN0rbBy93L0xZ7DKJtL9rloh37xW4v1HaXYxuhtruY0wwQCAWlr0yMjo5KExggt241SGpqGiSwSuHh8OWTzJmYmFDTcrI0U3CfICBhBwIc8LGgaTeT8mQlMAnf3t6gAAjDGKcfPfa6yjZRCozBBP4nH7+lvhbGAJzTo9kefUWY5D9y+AC8RW5Ic/N1BUBmCyeYGMXFZVq9TskumpIb4/LZ1mN/WttuKVhTCPN0wzSxAxscn+DgGJHxQaaI+o5g+6HYHsHrWWMQmo7ONlNxT1+DfaWEmO4HxjQ3rxgJ6ZMqR1YBP5Y1a7ZFNIYPtd3YNBYzjamUUjg5pa/6GJXhGb8Upt8Xzp+Wa3VXoU7bK+Xl1jm9nGNDBkgsYiMQG4GvzgjEGCBfnd/6q7qnVEXZVpOnDJAUh5X69KQ7Jc0ZLJ6//CO01AyQlDhUYsciNgKxEZgxAlEBIGQDjKNifqAHQAaABbvfB4EOhplmvnOa0VjmNJr8WUAIMjWBNvg9KTd7RmKe69FUHQvDJ8OJSu8AKJJkST35B9vgtQHKAZ7Ryfwg+LGuoEY6bl6X5pFemYI8Vx60aJvgPdIKdHesc1LyvFiXe1rHl99xyW9OFkdRjuzMfUI2rlpreXTgRXuoKE59OujxcSj1Yzl97bjsGaiSTTVb2S1xpblhcN0vgz02EAJkmF5nnqQlusTROSL+3FTJnrDAg8ZB+GDYFK/IdGB0Z42pF8a5sXrdhwl4bHxw6Ljc6h+RTOwXQSEDfBiAiAJUhqximB9sizq5JugDcuPcRdm270nJgIE4YBCdRTaIIzENv5ElY9XV2SQ9MPZkeDI9CmiotwVRKoIgAaUqA1D0NHdJf2+PemUwCIJk+dMkbRiMIAAlRZCBahH4gqgPiEuGwf6w5LTAGhlGFVuAAEPmiOp8IQzYQvCAslbZOTU6nd9zAWSM5FrVnWR4FPHHgxxX8Z5a9fQwRuelaTVCn48H0/KltfuSNP/4DfVS7y+zgBwCHwxjdE5z87RRMlucCpQFh2/ViCQDIKEROoESfhIwI/uDf8MiRINm8mTG2ON640UFVka6AOCAsUJADvUFgc/gLd2d7xmpDvnGfVUAQGa+VNMzhvNWStDIebaYS7VwqCrke3laqHFbyP5G2160y0Xbl8VuL9R2l2Mboba7mNNuA4EW2B1qn+61aW1tbXLxwjmAm5b/R3VVNRLgK+f6Fc14M4F/9epZ+HsdgnG4Q03HK8KwJjifYZ6nTPuj8Jg6fepDOfz+r/X56lvf/mMFVF579Sfy4Ye/BniyRvbu/TpM4pMxVhPSCHPvzz59A8CBX3bufPQOZoe5rtJvhB4ZfG4bDZiEz7ZPBA4qKjdOV69TjmvT5gekZtXmWVkgBD/eeOOncvPGVXlg71Oy/6FnFdCimTvNmskmGYa5eajrPVkv/X2QqsRyLngncL1oYjYGCH1YSkpWS/WqHWB9HFAZrBIwcC5dOqUm8TRJJ1AT6vyNZtuxZWIjMNsIFBYWSlmBxQBrvNmioBsTI0tZHRqqPzEGSKhRiU2LjcC9OwIxBsi9+9vG9gxFvYnxsrnaI/meTTOGgx5b+Z67X6S6lPf41Emov8RbjNvYsRAbgdgI3B6BiABIYmIiErpOaULFfxwq7VLBADEm5pYEFsyoA9JXoQbWACPh5rkCL67e1BTpAXuBYWeT8HtyIrw6siwEk6dxZz8YAYj9rYniK8iWsd4WsBR6NEmv3iTaptUWQRADGBAsMEboXLa3/6J0jxRKOhIEApkiQiyKV8DsPCPPI1ulVir8BZIbbyUgqGzNv8ex7ORAqjiKUWE+CPPNjDQpTvbIRIUFMHjy84XeH1zWkZIuzgB4482/jYacdTdLv7tSKH+V2jAgDQA9vN5RhQcK05OnmTHsDvtvgBB+t3t/8LsJ7lsvTCcZLiS0e3IgOdVngRZJ2TA0B9jUmzAg14dQVZmAxAGACW5RwQ1INak8FYbPSFUp4wLLNHm6FPzyAAAxDBBuA3as0gfwxJh0TBXg1wkYphs5LYIZ6R6wR1DNpkALwBMNymMBbOnuqpO0XvjGQCsvNS1Jt319AswKgAxFcWPSUihSDeBjEBJj9PRgUPKK4MdzowVyn3dCTpx+S5pvtOo8A37Qx6TTe3u8K0bzpZegGnbQhT4R1OsFCBLXg98SYAbl3Qq6ypX9QfAjP2kV+nhN+ksIhljMEQOMcDsEUAh4DOAlVadb2l+WYXsxzOJdRepnQjBkpUQcmBPJqO5LCCoXJwDCeSslwjFAVkr/Yv2IjUBsBO7eCDSAJXns4y/UXG7NphokrKuXPUm40L2nFwUZBgde+bE2ZQzJ7X4bTH7Sf+LGzatqjOzOLFCjdAbZE9evX5Q33/y5AhWPPvZtgChfV2YElyeD4513XoQ0WIkCHZOT48oeITjC5yzf6DCKLyog55QzPXYEGgYG++Vm/QUZGOiVyqoaSFtZCdnZ9pcyW1VVG2Xr1ofVKJxslXfeLpekZ1JUVsuwM8jO4zbY9+6edjl08MVp43ayXBipzlQpKqqG9E+WsjCawEChN4q9DT/GgswUMlS4XEXFal0vmuC4hwsCGzkAX9as3qw+KJTByskpkIb6cyp/RZN0MnViERuBpRiBdDznl1dU4Dz36Pl35epV2bV7112RwUodjx3nS/Ebx9qMjcBKHIEYA2Ql/iqxPi3WCBBgYJFnmjMJ7w23i8WY+4i3y0Ys1gbn2M5SM0Dm2J2wi3vjIZEf1ySuqZKYpFbYUYrN+LKMQEQAhCaGTHozM0//D4IfBrQgI2B8OFDRj+ljnd2SCvkAVioaJkhw1aJ9YGiO7s6CIXd8CpLjOLGQoA4GPyYn3UizQwYryEzcjQp7V7dTWpGwbu3u0wS7MWbnpwE9+uAfEAwYECggkNJdd16aYJReng/nCIAUQ1MWA6Q3dwKSV5mShKS7E2AHGScEOiZvjUo8EAGCH/xM98ejxt8KskYywB6hOboatqMScxr8oEloNpLP/ZnqZUI5r5ESp2TAK6Pncr1cO31G2SqsMmTlpT0M+GEHQfi3nflBsEAjtQsyYn5UlELbG0X0zWCkKM+jB206fTKSAbkvGKWnZY7JcAJYCs3WbzeYAskhypx1Y9zAoGGKwI1Ei4Id+OEJFoAiokAYA7svA5DjIiXFTOP0uLZ+ZX+EimEALIzkXDBN4Dsy2tkhaRXQKB8mm8LyB5nWBMNyBEJSkJwR+FXU5XWB9WFJgRH8oL/Lt+JSpfbqTTnhuy7nAH4Y4IPbIPPDT/wL/VTgA/vWi62orwlAHrbN4PE8FWCk8O/OQctolsd4eg33HNs3rA+CQfyOITDr0xidQcCEod9zuuBXYmO4KBOEkh0WG0e/3KUYwDF24nK7nK+fKehWnp8u+7eVihvA20qISAyQldDHr0ofKLPDSvtog5I0sQrpaEcrttxcR2BkZETOnTsH1kCdpCSnyu7ttUi+V821mbu+fDL6TnZBQWGlnD/3mbz33ssopEDRxbYHwWiwqrVa25rkwyMHNBnPqKnZDEmmUgURyJ54//1fgZ1wHHJTtfLII98GO6FUl3vkkRekseGqAhGVVatVxik3p1Dy88vV5PvWrd/CU+RtJPsLFTRh0p/R1d0JhsibOs8PuaLc3Cpdh8yISEFfjL17nwEw0aBm7IcO/grPXQNyHwzZN2zcPb1PI94RAAqX5ejR3+gyTPTue+g5ue/+p1SCituizNTadbvU9+SLLw7rd/qhxMcnKpDT1FwvX5w4DECnRTZv2Yvff5OasIcyQg/u92wMEC7LdtgefxcCMJ988q4CRjSVp/9KtFJbwduNfY+NQKQRSEhMgrzdKgXbKO9HltsgCpruhg/IijNBjzR4sfmxEViCESBYz+B9h/cfRjhfrSXY/LI1GWOALNtQL/uGQh3DfM76Kr2njcPj9EZLP1RdOsQ/dltiOwOASO26Ani4WjmcZf9xAhtcSgbIYu1Tf0KjtE6elt6ky1CAWSuFCVvFPVEmE/GjkjC5MnJHi7WvkdrhPjO+avsdaVy+bPMjAiBkgKSnpGhifAB7R/DDDnwQwGAQdKCcVbayL7KktR1GnDapKyODZT6ZZKa6M1kEEz19ykLw+5L1BXNsLGCOgfmsWB+kBwgMrxkEGCbK0yXrLGSk8L0b4MeQz6fr56PCkN4hBgjh8pkJ1sWOIAhBA8MCIQjixYttdhuYAwBACEpM+IbQFmSqcq2EQD48HxhTHTCYjW+WsYCdhyfXJ33W8a/zFRxBgp8CV84Ut4ySUYJgX5OdbnF40mQUxoIEcoKj68RJab10Uycb8INgCFkgDMP8sDNAOF1BnID8Ff0/zLTCCiRI4vizjltG6Phk1WP8kAveGqnSNeFC4giMCyTmCQbYpaAIAvA3NuGGv4cBLpQdAiYEzc+nCHpBoUr9MZAbTUnNkaTWepFij0pb+erIfLjNFqHR+ACWJQBBg3OyQgiuTHVwSwDVyDDyYh8CaBKZJ9nFleIrBEsEMmQ0tTdG5/T6+N0kvyRe6pADDSel3zc6A/w4gepgBhkZBCY82R4cu5XSOHxe+5sV2EZPZ9cM5hKra43IGEG46923LOAHU61xscaYfzPIGAkV9AnhmI7dmpKJLYmSQkDIOpxCLb6s03w4Bo9fxrgdrZMxPNjTCMyRFC971hfKno1FCgquhLjXGCB8AOULVKTgC9ZcH0znAlCw/bm8vLHflM05eeIQkqMWw2q2fWDl9u7dT0heFFXjph0zNrNVZnNZJi7nA65wfGgGHe34z2V8ZhuL2LylGYG2lhb5CF5ZTNDT82LtmnVInH/5qpX58rlq1UZ5/PFvQcqpDXJLJ+Xv/vbPFKBwuS0Jx472JvX26OrqkI2bdkMm6uswSS9EUrRXgYqPj/1Wk6WPPvYtqa5eP31ub1hfK/v3P60JfMpj8bykvBT9PR555Hnp7LyhANI//sN/wFi+KXn5Jfpj2be3Zu16laXiOtG8KPO5jb4fzz3/A22L4AuN1M+d+XDGPtGgndsnU4RB8OO5Z3+gAIc598pKV+m2ybw4c+qIjPq8Ulv7kAJEg4M9cvbc5zo9P79I9u37uo5jNEV8vBYwKBOmxSaQDQsVxSXVCjbxN+HY5+Tkgd2yW9wuFOMEWCyh1otNi43AQkaAlahr162R6qoSSOBektPnzsJzrhP+OsULaXbO68Y8QOY8ZLEVMAJ8lhsZGcC11YtntSQUIbqWFDAm4O3DdXyxt8f94D2CBQFNTVdloL9HpSApDelyQ0YHRQE52XkK2N8rz4sxBoh1CvMZYWgIRao4hhksRmFRxFLFUp0z9mO4s+MWnrma7/ljeLbfiKyP60198qsPrqrxuYmKAnj85qTddQBkpTNACH5cm3xXRuLbZHx0XFrlJFBhkfT4/K8kCNAkn+shVBK/6yu5/7Oda1+meREBEO4MAQp/NwAGJNzz8MLN6AuwAZjITvQzOQ6JpNwcTWYTjCDAUZhvvcg7vQ4k7FPF50yWFK+VrHAAKPE4kZDzZCp7Y2iEMlPjYE3AtwPF8klJMJAuTsEnHqTysiUZUkWjAEJMZdLEiS6pT2EGHXrVMEpn5BWVSUNARksnBAUNxEvQFkOBEOxTU/M1cYEFUhhIOsRnFchIHiSorlttc1mnsxXSWJnK/HC5emSwD7JZmNbT6QHbgtbXzN1nSmYyDLQJoPTUizujQKW1pjLQbySeyWAxIA7ZIozBQZiQw5ydoZrbIRggOjNMGDkvA+rUrKqRkqoNujQv+OljTvisD8pAfIc4RzOloHi39DbdlC4/x7pPmQxM6JtkPj8JPkw1W34f/B0ZBLy4bH+A7UPGiH09ZUQAr6FpOT1A6CvCoOQVgzJaCQA/zHROIyhimBTDWTBUxzMG+D9SmrpeMteuknb4gtCDxQQlr76FY2iHr1VOXb4ljS2QPAuAH8FeHwQ+1LcG/SVg44Hxd5Zv5kMMj1U/fGK4Vcq08bhzpDgUFOGxy/3rQR85JvxX4gRTBSCXmTbdscAfHDf1UUEQAKF+ueOzUfGHAUqC11+u786URFBBASwJ/+FngxmYE+fYSop7iQHCB1FWa18e6JIR3+wsoGyAjDsKa+b0wN0LHfyTbQ0R206Fyf1mmCKzEjyahCaPB74EsgKdMj2s1I4UTEjyJfHhh5+P6sXXjM3BGxbzarb2ncmJ8njNuhmmzrMtb+bxZeZsV4N0B1hf4dYx4zMX8CZcW7HpSzMCfEm4cbMegNxZZSVt37FZtm/bsjQbW4ZW09Mz5WEwNxxggxw6+LKyOdrbLbaH2Twlnvbue1qefPI7snXLA1qF2trWqCAA4/4HnlZ2AhklJtguPUUohUUA4ty5T5VFUQ2Zqvvue1LPTcpj0Sz97JmjM/aU29tZ+7CCKg888I05XYuYKGAfM9KzlHlCJgmBDrNPSXgeopQXPythKH7ffU8o84Pgh51ZwXa4bQbHhf0kGML7tGEXk6FB8Gjfg88J9zeAbYgTz5WcxyIa/m0Pc91LS/eoXwmfMR24XgUH2Sw0gr948Zj23zI/r9WEl9lO8Dqx77ERWOgIsAq0rKxM1q7fIIePHJMrl67JmdOnce5C4m0ZQd6YB8hCf8mv1vomaXzt2nll9lGasRQg9jPf/KeQDVz8+7PZ3oWLx8GCfEWBexYAPP/Cv5DiovKon2+DfyW2S1+tS5dOyufH35VLF0+pzGJw5BdUqR/UrtonFPTnvTfaZ+rgtlbK9xgDRACmDeuxe/TY6/rbszDkqa99TzZvvn/RgS77OfM+ilRuodDMnDORvNMiHTPcDxauffLxWyhEOYo8U/sd/nEs+NywYafU7npMWDDDZ6gv+zEcaVw43+uf0Hwcg8XVKyWWkgHidyCPF7n+MuxQUPaKzI+BqSa48d5OGQ8L2DSoikY5c9h1zYz5sETCrRNuesROLNICBINaEj8Rh98jeXEbp/f/bvdrkXbvK9VMRACEFQ4lGzfAG8F6UWRKm4yQZLAu+MlIDxj3GfCD08gUIcDBABwiDjAzmHb3ZLulxzuuSfVEJMoN+8Pns85QJqD5YsrI8GQri4HyQWRRTI2lyMjgiPQ1dUI+yoqUVI80A0AxwW36uy12RDBrgssY5oT5HIcW9TiqEr0ALxhDkLvqA/bhaRyC+bbFABki3QFBsINASFL8Tf0kGMJPAh8mugCyJLot1glBkGnpLiQTGWSF9GZ1yy0k1+n/wWR7VQ4YM0Clg+WvphvFHzofFYtktATvF79v37pNnIUF0tU7CqR/XIaRbO0dh9QWmCKOcYA9ad3S2Qi/islKAD6TktY2JMOZ8L4IGLYz4b+qZrX+phNZcQokEZRKT/Urc0IZQPRXYQDQUCZQwp0VlN0TA5IwYt1gjCcLlyVTiEBVXk2FdNTVQ04KAbCMoAI9QLiNwrU1Urx5u3zS+rnQ/FwKsVTrgBTcSpI/3pArGWDKvNZwxsb6uJ30MUbnlLwS3Nd8g5AdCxwkPZDlYBj5K2P87kjp0qSKkXXj/hl/G4IeBFE8xeXKWuE+E1AxYJEBgPhJMIV7zOUyfQUKIpGVk5WQo+sYoEc7cRfDnZYsj+4slY0VFkBluuJB4p3zVkrcawwQgh/duG5lZ6TiHA99yWWCvqlzQNZlsjIZ17ooPa1HcMyeq++U6oKmKlxlAAAgAElEQVTwXjNeXD97hry67WwPKjZwTY8UrHQeG/NLc0u9Ss4wjDRP8LqsxvfhxZGfra31WpUXSSrGVGNzbBiedPhLAaTJRuVVN7ymGOZvAkctPYO4loHBBVm8aB/UuQ1WctU1AQRF+wSYQoUX956mrgEdnxgAEmqEVsa0XgDyH354SKWPqJO/ffs2KSiK7FGxMnp/Zy94HPN4fhz+HQQomptuSGPjFfXnYLjpb6Hm4qtVporAAI9pGnF/73v/G8AEv8pb8Zy2nxP8myyK73//34Bd8gMADihGwTpkl9FjhMBDReV6sEsuYixvwmfEen7i9vLzK3UemR/zqXzkOuvX71R/kYcf/pbuU6htFJdUaZ/CvXi7XZ7pcam/ecm6rmBcyF4tLKxAH9fp+nbPFDLE+DL/r//0P+n4cT6n2YMAEsEVjrdZJljii8sQKOI2OMapaagSxHhEc92csbHYl6hHYBT3jljQvjBddu2qlTd++47KYH3w0RF56qmvLSsAwt8h5gESOxqjGQGyMNo7WiF7+IocPvxbZc3xOY1JVzIWFzu4vZuQUGRy98MPf61MRvpWMUZGrM/5bJP3VRYTHTv2Bs69f5CbYF4SrGdBQDZkvZmTIKjeDc9HFg2wWOHUyWPyjae/r/cTT9YKofnPZ+exzleZAcJjqrevWz777N3pQpTu7g6pWbVOdu7YP88RDb9aqHOGx/CWLTv1nCFjfb7PGmQHUzb0tdd+qsdo8DHMXpFxzOOb5+rp0x+gkOS78sij35lTcVz4vVuZc+j1sabcI99/ct0MCaxsFCiX5JpsYvR9ZzGWD++WgwN4b0RxQnp6Bp6v8dI8z4jydX9erbNYrG+qAQofZfNan54flL2CHr76LpogsOKH7D8yZRHbtUtFGfmoobh2aOL0h/UTCScvZaYTmHFA5z7cchE7NY8F2HeOJfMdjjiP+OOs/Y+BH/MYzBWwSuhsnK1j1KV94nd+ILUD7TrV1z2lLIy4ASTq4JvRMd4vPW0tkrvaJu/Uj8Q0naMR9NAgwJDpBuCBSolsd4aMjFpMifgpnxz95DNoKc0cCT5oZGTkS4GrXGeQBcIgcnsruU9y2i1mQv7qTdLX2wlJrCzJAxNFDblTXOLIHkcSEZXuSP6HAgvYVkGJVa2fV7kGiS+YeY9OStIoZKF2rAE40Rmojwe4g1OUAAjneX24TPl6LTAVn5aaklcFqKYmveIdtS6AzskWBVToASKQ1jIm6Pz02uS9CobjJQGAUBvMzw3AoTuqMTntY8J9sCSxbmsXcnkGp6/PWyNPffd/lRNfvC6NMDDtbtuEBLxbgSPGMDw30uOtddPT3FJckyen4uphEgKQCcAGwYlMGLoT3KKc2CicMRgEP8j2oFH4gO1HMjJoBC0MEEDWhZXK1FU1ysEW6kPbCb4BAFbW2BD86EP7djN1AmfZNetV8uqVlrd0XWN03ignFAy59sER6aPhuE3yiswPA3xwHfZhajJL4pJ6AX5YxyPbVpAFn8m4VJPB0dHcPMNvxgA0BOUojWVnMtGrhGAN2+A4DUKqSw3UgSFw7BIDrCcCJPRG6Utp0/4zVCIM2JkrTOJ1esFl+iPFkSDryjyypnTmDYvSCwu5eS929+8lBoiRXiL4sbY8UxyJocEHZ3ICEvWRWRbBY82HaSd+19U51vmVHgcKN7yMpj9RbT3kj5MzkRWsgpvWl6LRUes8ot/Cjm/vlLLcmeAZV6o71iyff/K5rm+St5GkYpikJcOEQXCC/U/HfvCKq6+SnOex/h4CoNszFPFWpW3Zg9vwI4HJ9kvgc1MSRueVnk2R2DnBbce+L+8IUMP3woWLcujwh5rwuH/v/bJnz/1I0ocGtZa3d/PfGo9RAp5rVm9RJoTPt1/PO1ZkJiVBehPPT4m4ZhhAlMszKcN/JkKBpXyBLi2p0H/By6nPBZ57CJJQsoRhtkmTb0dAwjNUu9ONzfIH+5jtydV/1j5Zz4JmFTIpuE+McNtg/xMT07SC2LRh7yMB1uBrTDRjw3ZN30x/gvvAZaIZ41mGIDZrjiOQ7FhZLNQ5dn/RFuez2I7t26dlsM6cPC8NjY0AOvOX9Rkt5gGyaD/pkjTEBFxra4sMDw0iQZ+LgoAsFKRF9mpazM7w2fPq1bPy2us/VaYhgyxgAnezBa/bJoKvvfb1zHJmGW7vxMkj8uqBnyorkAWB3B4LbxYaZAoT/Hj1wF/LlctksJQru5IV8pS8sqS5Afa0N8jxzw+q/CS9u5irIOv5wQefCVkwYN9Xex/D7XfwPkezfrTjye0Ht2/6dLcYIN3d3QCVOsXtRiFpVhaKTJe3EI8sdBZ4/fY3P9XflEF2KIGD2SLaMQ8eb24v1DljjuHkgLLKbNsON4/MD4IfL730l3ps8hjeCZaSSqfajuG+3g5lOJGhy2N9ZPgn+pz5xBPfnVFQYrYTzTFo71PwPkezfrTjye0Etx9uPOzTE3BtLMvPkHzP7aJZzk9AUZAjcX7XzfPnz8vf/+zvpBrKKzt31kp5eZlk4RieDxhiuyRGszvzWmY+SXqu0zt5UxP+wUEGhCMuMnjENgh2EDjwTvVOAyZdk1eEIAr9RLISUJgNY3U7oGHWI0jim7KyrSlxVqEn+8Q0ZXncvqgYKMF9X8h37kMiJLlHpE2ZMeFkwAiO+JOQq579UrKQrsTWXeAIRJVVun4LlYl1VyQxI00Ge7oBRFgnQw68M0wQ6GB4wAbJKLFebn2DzRKf5Jai1WXQznRpFRNNTK0XexxA+JsACJkGY2O9WmVhwo3KSE8+qvltQcpaQt2QFFyy/DZ40UldXSh/0D8OVHdc2xvot17oWSna1vTejPXNF4IfmaDKOpLhbZJu9ZsAx1gyHUluhxdJuKRJn8QB5eweHBcfDDxN+EYsEIbfCRqQOcH2GCNDuEjHo6o+06bdy/FBe2SAjOQBagCpgB4jnupVuo4F1FggBcGN0oJsVCDUSEdL4x0gjq6AYGVK7f075Ymn/ydZi2WbmkjZvaUyIVWVFUj2O7QqhlWM6RnpkgZNVkZmRpFUuaukpXhMpWFU6grJ/WHIRTHIlKCsGUEDN74n18AMPWD0zfkGUBjvtTxD7HJY2oAtLCN1UA/BIOIYcay4naHhFAAVMDqvwfFSu0au1V+V1uHuaa+PJwsekxfgT3FgslHeiu+Us/Ej4gb4cc6Jqx6AD4YBPxT4oDcHcrMTfkiSwZeEYAX7aTdfL8nMlxzPiHT1tEgi+uBKJeRjLct+0qCdjA3jX8IxUG8ShBrB2z4piZWczXGxtkGwwwUvFLI99G+wQBh2jxWdcBeDScSO3hHpCJID8qAKogg6mMv9AhVuKO41BojZT4IfIWm3uGRR4mmuwao1O9OCknsMwK4zPvlloTJnaenpCn7UPmpd4wKbCHwUy/kzaXN6EbX6HqotXD8BsKfiP/0cn/nAOnO7s38zLBPdf4xvyLHHvFEc/7FY2SPQ39crB997E8fZBWUiPbr/QfgybFnWpOBSjZBJhvBcNuADt2WmBydLgr+H69dsy3EeE/1kUAS/fM62XrhtBU+f6z4Fr2/f/2jHxb5OqPbMtGj2L5plZtvGSp83OjoKfxPr2SYHz5HLnXha6eNzt/rHQpSCwiIkXnfIxygqoATb0aNHZePGjcvGAol5gNytX39u23311dfk7UPvyLYN62XHjj1IxFWDHVeIJJxnye+LvGeQ6Xvu3Edy/doJ9Xmib1NLy3V55df/PeyOEMQYHBpQlkgqlAFC+YQYX4T+gX7cDx3TPgxcj+AD5RC3bHsIScf9ymI88Er47YXtiG0Gt0f5LjI/mBCm/9Xv/d6fInn8KFiYnmmwnquwaGD16m1gB66fBkuOffy21HBa+eppJqaROOoCk2BkeADPxoGCPLyDZ2bmgvkJdQvc7+3BBLZhs/AZh753ZNf0Q07Zvr6dORk8nuxvKPaA6Y99TO+2dBdBvHfffUd+8auXZFV5lTxw/149hougZJGNd/HleB9lgdr5858oE4KyUJTVZGHWi7/8y7CHDo+X/oEePYZ5fIb6Lbky2RjB4822v/ji0LQ3WjTnTNiO2GawT5S9orQpmR0EP7717T9WiVD2z/5cOY5jmL5pq2u2T4Ml9IJbu7ZW2bssJDHBY5JeOPZjkLKhbhzDhpVs759h03CaOYbJrqEk+DDOXwbzUDmQYzbHqn08w10T7P3hOcLCNm4/2mN4CsfasBf5rgEfWAy3C4lTIAGe7U6RNKgPzDW6UCz7+utvQx76lwrEUpJ3165dsnnTZs2/5eTloZDGyrlFantJGSBgbXjjUFw5D5SFwMWwZdZ7xy6kSZ4CFpGCMlnT/iH0QrXvLMAB+on0Jl5WSSm2Od1PLMdtq+9IwEOVwANjfAoqQvjbOZUl5bIvUheEbBEyWRgEUxSIicuSzLhySZ+an48J2TDDcR0K7hh2jZ3dQnCE7BsG+2pnz0TscGyBZRmBiFm3ifExGWisk4snPtIO0buBQb+D1oFuXDhAQWpPkrTSRBlAwrnj5nUAIMXTzI+x1BFpBQNkrLdl+gG+DBfnpCQAIN7haZ8Is7fGON2VkSlZ2ZkSPwEQAjJYjCFIKPXUXZPUsQHxVadIPSSdcgYHJQXIqwPt9SBB7kAFQUZGhlRVrZHzqVZVip0FUrO2Ssj6MMDH1bqbeGj0SXY2HkgAgIzkpar/R1HrmEBESiDEpMDH5MSdCKj2ySYF5ke1NL9T6omvltWrNyv7gwbrZIIQ/IhPteShuO4ayNYkTt4GkTgtERJSJf1T6mfizkwBAMKpYBsgAbkW1PjComJpbQF1A7H7/iekElrVeSVWVTYZNjSF74RxJ8chC+Mw6OLVxvqZeyD3leEaFn9HotzohzSFu1/yWXGAf9keVF/4wIAAMGAkm1bXVOJh1SfcLwbnERxgGGCBbAmCBoN0RLIFoSQyPfAja9sMBYgwPgwCFflbd0jSlnz56OYnOs2wPh7L2CDPj0xK0+fHZfLKFZFtHmnAS3o5FwrB+rAzMrShQLBfDAJs48OXYdI+KB5Umph/nMfjgGPG380EwRBoYYkH/yQgn2X22yzD/SdwRKCFwT6YX1YlsOADstKif8gvb3xyU949Xj+jazsBIv7wmY36ILAS4l5igMw2nlkJPQDLcPzgGSmuae70fT6odqDKIDjswIFWcvpDs06C14v0PSsLDyfggoUK6vrPpRKPfQ9nTG5Aj4WAH+yj/UE+uM869qRxcezBAInFyh0Bw/54/TcHp9kfjz3+da20utfibiTel3qbi9H+YrRxrx0rC9mftrY2JD9eRBV5m2zZvEXWI4lKs+0YGLKQUV2cdSmDtWfPPnnxxdcgVVInHxw9Ik9/4+uQqF2z5Ilt7kHMA2RxfselbGUSF0QWu3148Ij+c7lelI0bahQ4IxhSVl4OP5nSJQVDWMjo8RSB/fA76uXExPy77/5y1t0maPLFcSSA4UuVk1Og6wV7HnTjPfWjD1+VK2CXVFWuhgfWC1JSnKasSAIPRZBX3L7jUUnD+1lPd9us24tmJpPZrIinLFBOTp5KAlECkSxA3nfs9x6CC5S7ouzV8FAPcg3voz8z3+GZBCar4LNP30Bi/TMFbLxeSNCCscIke00NvMu2PyRbtu6dIena2taE8fs53rdHlXniRYL9k0/fUYCJeREG/Uf27HlEx4R+JwSFPnj/V9NjRSmjUDKu9DYhO+AkGDTpyAs88cQfKJgTCiyJZswWa5kh9P+TDz+Vg0MH5eVfvSpllUWyZ/d+SE9tQEIev/UygCG5udYxvHnzAwoMcJxmC4IYV6+ekmNH39Sx3PvAN9HX7TPGkuDHJ5+8I1+cOIzfqUIefOh5HW8GJTy/9tT39dgnEMDfb6HBPp08cUiZUXwX2//I7yj4YY6F4GNYvd4AIA4N9ysgVwypb3sQlKB35WefvinHjx+BifoNlX/jMUzZ8JLSDQCsviZbtz04QzqLgN2hgy+iuKJNj3H6sHGc6urOqvQWzwN6tG3b/oBKpNKXTkEhXBPMWD362HdCevnwvDp95pi2R+DpSfizcP1ojmH/+KScvd4jv/m4Tnpt73tFngz53cdWy4YKyxt4rr8DfXsJyPAf79Vvvfn2NBiyb9+DYHavVjCEUr2z+XjNA5uYa1ej9uuwN0z2BQGI6eRWYKYBHyJ1goBAh5y3QAwABuGAADJMxhP8MjBhgRQzD8bb3+wgAv8msENwwwnll3Bh+kDfDgUkMNgEVBIhdds1WSA5cWvmbGZu+sGxIbiTE79mevMEV6aBm4BkWAz8CPfr3N3pEQGQ8al46cSN2VTv09Baq/ohDTSAe/KWdTtFcP8f74iXFEhapThTlSEy2GQl6fPc1ZCm6pGW9g5hBX5RQRaqGtpVWoWMDYalGW9pbPLiyiguhr6zY1J6NC8YSMC3t0vJLSsB1wsE2NHfKpNjqTIe3w6JrXRpGMZB3WfBi2SeEERoOn1K2yOAkFpWLJlgqGTGT0gGHmIon7WuOE9uwKCcSf5MSFgNd6CSGd4cU33D0jdxOzFu/E7YFhP5/Md1zLgQ9CDLwRiyc57fB/krsAIIfvBvpOKlOYua1KkYMtJ33dLd0g6JpCxZDSbICKSq+vvcuBD0K3gw2NauniUEbdbtvF9Wrd2tgMqm7fC3yIZ+uCtf3HmQr8L+9E1aSc5GyJEd/egjqa3dLXlAoL3eJGlov6VSXtn5NeiHyOWmSwpUSYFb8vfdr+PDSO/jzXCbjOOT4S5dLfFg/DASk8Z1PoO3iqG2TkmfGJOhBAs5j4P/RX9rvbRsd0lpD1BhjIeRuaLJvQFByJggw6Rqx07pzhqTTwF+EPhg0Oj8DxNKZf2tj+UIjM7P3Wi1GB86V8RIXuWmWjXu9A6h3wfbc2E8CdzQ34O/CbdpWCBVxZ5poM2AVMagvQfHsgIi+D17+vvUm4bgB/eNbbA9zr/WcEOZHcr84PFP5gf6RBCEMmEa3dZDahbYenYQJC03/MXZWnF5/j8ObdGe/lG51TEkY3jAYSThgT4zvUdaUT3iTl+/LFU3kfb2XmGAsOqKevLhwiTgOX8qlVyrznCLhp2eN8Uk8Ex9KztwwL+HViAH087OsO9cMHizEBAk3DaI4fSiUs4yIsLYxxggYY+vlTCjG88aBw68onrf9xr7YyWMb6wPX70RGIV+9cmTp+S1V3+Disx0WbNulezauWsGGMJCovnIOXz1RnNx95iVz1vAbrtv326VEzr9+Vl57+D7SDqVzZpEWcxeLOS+u5j9iLUVegQolcYw/mtMwLXhPf3wkWNIqt4GQzZv3qlV9dXVVVFXI4fe4sypTKhSNodAAYOyhkbSdLb1KbFI5gMToh99+JouavdyYsX56VMfIhn+VzqPAIjbxWdjqAlj3f0PPasKEqz8pmzVQoOJ3q6uFjW9HobU9Lp1tQqu0JtqNtCd1etPPPmHCkSwf9x/FtywPXqUvPzy38jnAC+YMCY7xuW23gEbG64qSHTx4jEkn3+k+2N8rFSaCLJETDSTOcJrNMejetUO3U2uS1+HegA1o6N+eeGF/1lBIYII3BaBkjVrau/w+eNzMKv4Dx95TT795C3Zc99T+htEkzhe6PhGuz4ToASNeAyfPH5a70m1OwDMb6pVMKQa+ZHq6upFB/SMbxi9w/gbhivKsu8HmTmUqqJxOX8LAlb0YjNgAxP1Fy4el1fATGprvSlPfeMPIPFlmDm3zxkeYwSmFhrm9710+SzYCL2yes122QXpK3rDhQse29w+gbyNG+8DmyJPzy8ew2yP4MeBV/4rwAwLnKmoxG8QOA472pvkzKkjcvXy5/Lscz/U88B44FBxhMbrHJfOTuu9lMdwGdhRgn9cl8cwwUaO2+9950/0mX5wECBk4HwpK1sj+XmFMxhSvNyRSUIGGM8fnqeMYP82s7+U7g+OHiQsT9V1yODIbcm8PqitDHsrghed13cmuckKIhBiB0N4H9+ycdM0M6S4tPQOxq2VtZzXZqNaiYblcw0CC5SaItsiOHjeZI6XS8LETBZb8HJkf3RNXZlmcATPt3+fD0jQ67gsheNbI/qQEJQIlvHi9gakSWW4uC/ReKTQb8QZj2t54AczbRAI4ZgQYDFslWDQaLZ9j827OyMQPjsX6E9i3KQ+dJdXVOjDVh2ADEZ2wAjd3w4jmvws6UzplGxvvEy6oM3cmypdo42Sk5yBaiKfMkXSC6DsjuQ7q1a6caGFu64mmBm88BIEYVAGi0gxK0umhgmG+NXLgv4fE0Ot4k5yST8YIK68ZPFOdogXuMKZs11SGFeKh6VV4sgcgCcJPA2SnGoKnnEtXdypOGg9eIiatC58E0ClGWQ3eFuRdARwoQwAJLKBUes8JsBNMNlNYEMZDQgmwA3QMS3thOn2ac6ePmW+uD0Vt9vBwxCh1HMdDQqAuL2J0thHYART0achUPSchZni7U+RTJXUKlS2SikumDl5lQqmJCdaVfqpKZkymWD9bcAP+pjcumFRRoZgnsiL7jgYPOPjidLiRf/7z4KQ4ZuW8mptuSlwep+OKWWS4PcNaOhcmDoHasPt+Y5yl4JD9C5pK3FZn2mT+jkCo9CxMxZA0vBQsU5jtGE+oyfbpcbybVvTJLtiFcYZeEH9NVQpWQ93GRl5simvXFo7RuTy9Snpc9ZI994KmRoBk0L7ma2SV66mBMkrtuSlkgF8KJCBQ4eMDAN+cHv04yBYZwzPCVRQ0uo6QAyyNcjoUOkvHIJsYwbrA0AUfxW2QT8CgiZsZ0LVBi2WDL1C6B1C43QXiHsEQYwUGI3TtQ9ggRAIGe60jnOdeBcjBRJytWvzpo24af7cjAeKS++9Kv/hcqrs2lGrFQs1q2ukoKBAXzTuhjfIvcIAmY3loIeBjR0bN08DRz5s28MACAuVj1rqwzQcO8MkXhaj/+G2YR93MkEavUY0bKn3Otb+XEeARRJvvfW2mgLz5WLXfbuE7A9WScciNgKxEVjYCPClkP8+PtoxnXgiGLIVMg7ZYGCTSU12CJNP9BlYaX5hC9v7lbs2mTjPPPW0fPLRZ5pModTR4489smwsEGWOxmLeI0B5n6UKPpOTARIcTMbwn0kkW2BInmzdtVllstat36RV9QRDeP9c6LlsJBRNPwKWk8HdmvGdiXcyHxobryDB+mNUyb8Lyant8L15SIuFKOPz/vsHpKe3Wx7a/7zs3vMNTUwzuK6RjbKp9My6vUgzmfBuboIaQtsNTbqvW79NAZmwz46BBg0TJAtS3fZlySb54IOXp5O5TBATJKFcz9iYX6W2Xnrpr5AkPqySWyXF1bJ58/0zwIguKDgwuUwPkkce+TbekVfpu9ilSyd13aMf/VaTzGQQ0B+LCWMCLZTLu3HjHJLDZCNYOQ52l6baTU1XpenWBe39urWbp0Gl2cZnqY/hUNs2xzDvSQcPfRAA9PKU3UQwZBM/F/F+xN/OAFDWWN2Z7A3uJ49BMkX27fu6Hjf0DiEz6fHHvq3AHFkQ77+Pgh0k+bdt268MEQJmDPs5s3jH8KR0dtxCXxp1GwQbikuqIwJc3HcCFwa8MPtJYPHoR69Mgx/PA2gjg6WwoESP4cbGa+r7c+TwAXnvPco/Vcpe+IzYZbZ4DJONsmvPk/Lc8z8QAkw8hnl+Exw8+O6LKjtGplPtzkfxnLFJmSFqzl53Uq8R9Gozlzl+Njddhw/gF9pNnqdkz/BaGOJSqDmbEXjpeQNF1lwnxTEk968vRk7ydm6P3pwMetGMR3MB06WtYF7SyVymLexJfILSBhB5Fbmiyqoa2bJ9o+bl1q5Zi3+rlRlCH8OlzrPMxbCcu0PWhN38fMZO4ks0/h929ocBA+YDcgRve8Z4L9DgnW0RtJjNyyPU9oOZLOa6FWrZ2LSVOwIRAZCpuERJw8WbLBANJIc16UyZIMSlnuuSO5qnf1P0pwRZYxcq7qdGylQSywdfBwIjGZCYGuxtBjAyDDAERpuouNekMyI9FVX0kLIi+EFTMbJAHDDlHgECySD44QdLoyCuX7xIXLdl1UlHa6LkZGXgwB0Tf8OEtKcOoDoArIRxPixZumsEDvyQjTJsDYIcKalgCGS4FBjgRTLBkyk5mEZPj8RMq9IkDjJLXJaV/wQyuK9qsI4oR9Ld78yEv0bfNOCRmV0iJanV+nDW2vaFJNAfBAAGwR/6fljsD11dJbaIMaQ2ecWfCXCHlSyDfVhnWPKcSWByWAl5bpv9ZyQHvEW8AAOSXTAK8oAODFmwFIBNpKtTszcJ+zLQZiHuTM4z4kBvHAJThjJfly9DmgzG8pm5oPAimZ+CK1J23YCMea5Kdba1nU65piyK4SzIZKEJB3CGFPiUMJjMj8efhDUu0OQEQW7IWGGWdLdavxMxd95a2lpP6LRcPNxx+SkwTRo76qSx1WLvtJ05gbKfQPYXY1EYly0XWj4Q6/EMWEe8R1omMYYW+US31VY6JpUXAYjgWDHyWwQopgBieGB4n5ZitUcGDMENk1umbBdBIVKIc8E+YlD5Jg1ABtfr8Y4rEEJwhI8oHHdsWY/xBBy/oyDsKNsDf6+BTilDGSH45DjHeZwq/0XwY3gQXiv43gk2TC7aZxjGlH65y+FOBzV2e6k8uK1EX36GYKD4X+rfll/84rAcPNghr758YMZNmlqWmzZtVHO65QRD7iUGSNifHKdCVlZAAgsLzYcBwkqdYB1hu3zUYoAIYfu/wBnh2DGmz4tRgRqOATItPYZ9oASZI9GI1y1wp2KrL+oIUPrqiy9OyEsv/kKTgAV4Afud539HvT+WQx96UXcm1lhsBFb4CKgMAV4GPz76sf7jSx7POSaf1q7foOyQLVu3LloCdYUPx13tHj1ZanftvCsskJgHyPx/et6z6Fc112TaXLc4DqYBC/jChR0MaTpQLwffPKjyTnYwZBO16quqliUBZ+8nQQMm72/cvKyMBCaLaQh6KHQAACAASURBVNLMKvlPPn5LE6eVqDh/5BFKX1WETOSGSnqGG4tw000CmjJAKlGFZGZRUXWgEh7P5HdiTHc0ZQc/+LzZAPbHqZPHtBJ/34PPipGk4raczjRU2++CH8J3VBaLyd6zZ49pMt2ehOdGKHW1d+8zKq2UCO9ABoGN2tqHFDxh4p3ADSWA6D9Clkl7+zty9tznAI2+rpJhJkZGBuBtclIBEhp8kyViQCUuE+o5eT4J4ekNRvkHj+HeXnqUhnYJnnEMgx1CQGSl3I8oIcVxrod3KRkJx46+hQLcDQpWUTbq3JkPdayffPI78IzZHPIYjnKYolqsr69LC4cpf0XJLSeS6vMJHqetAFIo3abH8EPPKfhRWbFGveJ4DNMnhJ40ZCTRb4QeKgawMNvkcwQ9ankOE9w04AgBu5079itASKZTZ6dVsEvAhtJwbI9sLLKy7OCiJSt3XRk1nqxs9S+hf1Coc5S5vffefQ/g2WHN8QWHXbaemh1//3HwEtF9ZwE3x2i2MAl/giFnznyh/8h4MWDIhrVr8Xy1SRmfE3MEYGbbbvA840URPD3cdzI3miY/u4M1YZZXr44IQW8MGp1PMyIiLD/f2WSY5MVvDCuDxX2ZLfgbBXt5zLY82SRLvU+zbT82b/FGICIAEgf6kyvNIZeBkJqLybQEEH0QkATul1vaI/pB9NAzAZlsJqGnfSJYEQ9JrK5RS69+GGyPlARkr9NuJ54IfpAJwos4b/4FrnIBy1OD5rWBP7XNBgApyRVWlX3GECRgoMnU0DeoN1PG4OAIGBNuNTUvKrF0F6cmATgAfHEkw9wbEkc3bjZP+3cYQ/O4jflYt0HK+92S5YaXCPwh/ABIylEJZ7FF2mQoHswMbENNz5EI72ttxa5dk9WrIH+VugnMFcgtOSeloblNigKgiSMlXZoBilCmyUT2hFOZLdRgViktmK3391mATIJvAMyUOOlOzxFXQSFYBz6QV/plIr0QbSfp9k2YlxU+eGdnOfHAVC3Gyr2tuVHbvNV8S5FnSjsRwGICv7PfK+6aUvVyIduFII5nqEt68JuswgNXD4yCRuGhYdguXoIkeCAchz5rWny+yk4xcvvw6SzQ8bjp65UbOBo2JVXIqu2PSlxRjnQm90ndMNCUDqAZBD0AgsTtLtF1KXn1rZFUeR/VAq1yU4r6DTLfrsQUY3JOkKXgVpLlTQKJKnNsEaxhvQZBjMR4y2tk1OZRYnw7CsFQslNZ3E6PSp+1Nsdh3W5lbxiiC2WwCHjoGAbaYl9dANoog2XaJIhEj5HEBDBCAkAIwQ81S6fJOMaHf/N4NfJfutN3Mfg8z8qx8Qn+NaXHnz14k75w/rT+ewP7x0rQVTWrp+mbBgwhvXQpE5D3EgMk7M+NU0H9PxgWLhh20XAz+PI1mwfIYoAI4ba90OnseyjJhGAGyEI8TMJV8dmlxzj28WnWfWOh+xRbf/FGgNWHN65dlZ/+7G/VDJjmiN/85tfkuW9+8570/li8kYu19GUeAT7H8XlwHNWLSxGJkExhDIPRHCrsFXr8m9Xk/GeqyUOBIUzWL2eBRKh+34vTyMK1s0B++/qrKEjZhMTo3iWtGI15gMz/aGpHEdqP/9tfadHZUgeL2qJNbBmJIQOG0CR5zaYaWY0EOiWGdmzfrhJry1GNzOcyJkKfeur3tWqdyVBKXVH14cMPf61sBppRW1XjoSVWDHix0DGeYPILXh7U8mdiNT3NKoIMlViNtC3+Fk3NVpKWCfJN8OekATXDtEeZFLIFDGBBEIhG2XYAxOFIAsgMTyYkhQl+mHUpvZSR4ZmuOjfsb7JL1q3bpsARWR5kAxQWlE4n3SmndenSKQUaKGOUm3d7Hvtmf05OTnGoNPlPf/oTlUhc6mBuglXy0UQ096M1qKpfjmOYvwnH+BF4bVAKi2N/9NjrUtm8QWgoTkCNPh8EBoKL1KLZ17ksQxYTjwVuk+HJttQb5tKGWXZ8fEKBNQJ0BFPIFqI8Fs83cxzyeOGxSaYJAYvr1y8qYGFnkvA4p09ISclqlQYy65Ll5QLQyXOc/eW5x/7zPFm9Zitk8SCRHwD3eI0wY0f5q6tXTivgsHbdLj2HZhtXgh9/8Rd/oUUcocL+nMP54ZYLte70WAX8HWZbJtQ8OxiSjbFlnuXR/Q/K0888H2rxZZ8Wirkx106wjb6phpD+IXNta7bl+Tv6k3tkYLwprAxWNEbtlLBif9OR26TMVbhQk/MogPFw68emr6wRCH11COrjqM+CH8hQIC2eptHqiYFSeVbLs/Ld4RuUW6yAL3er7E+hu1Bf5IiSMsGehYRwGiSakuGh0NXZpJbZ5Zlp2hZIAJLjgYn6SK+yP/LySyRbNfEt43N/R5u0dV6RfH+/yl9d7Y6TgVuXVIKIng30ZmAF/vXGi/LCQ19X4MPEGNgbLoAL4+PkJ0D6aGoMaHS89mdN+XpIVGXIrZ426LrlSSc8MwQACI3dqayfCXogg+yMjPQ0yaip0P7SMJ3tDKDSJx43gmHcMAYaz8t4d4NKWXF5ymGNerssA3S0kRvvEMfUbTNilz5owbTdC6ksJMwnmHBHTIBxkpHiVmYKzdfJZKC5+SiwopTxQVQrW7qOY5CySh/DTSTbYoBwXT64MHFP83QGwQ8G5QwIcLA9H34LA2T54sBgyKyUFDB0+roDDB94egyPpAMEsNgkRtZLmREABybAtJgAdkVmAxkYnZAMyyuulLKqfBm+0S43hk9L53rIVYEh0554RaZuAvCAx8e0z8c6S/JqfUa57IDR+aGTB+S0f/A2+JGF/ettnwY/dsDgvhG/XQtksHbDQI6MDhPG5JzHgAEmeGPu6mlR4KEJ4x/Xk4LfzaP9NcbsBJhGhmlIlyKbM4sBnlkMj47mZinHQ3AHGCEJYIckQ0NTmSBYnqHbC0hnkTFiDNDHKamGoSajZCIA/HE9A9TYDdanO38X/hiA3uWJy+1y/DLPOCuO9KzVaivq32oFaOCm3o2HZspisBLU3KRLi0tl165dME+jVFaNZLhcyq5abDDkXmKAzOYBMv0j8CS/fVjP6ciI5AEyp8YiLNwLjWJchSIsFd1si71yZ1vBDJCFeJiEqmzT3nG8TeDvyTFei62XB9uc2J93aQSYBCb48Tf/7cfyxutv6gvyY48+DL3g34dJZtWSJv/u0i7HNhsbASQCppBIuSivvvqqtLZYHnpLNSx8Lj954mzUzU8nUIMqcasgjUW/uZ21O1H9Co8KSOss9vNA1J28xxYksHTf/XuQDH5Ifv7zFgWCyYYrxzNxKZLVSymbsZKLJ1byz9zb2yuv/+agVvouVQTLb8x1OzyX665d0n+ujCNyCNIsDz+8V7773e/Kli1b7tCnn2v70SzP5CUBjv37n4YP0U/0H9/9WRVO5gSr64NZEfZ25wNQBPdrMdqwt0kZb5qyGzAlN7dYZb3sweQxmS7GE6Qflfv0TbA/qzLx7ILqBqv47X1kWxw3kzw27dKLhTJirLjn+F25clzZIomJach7TMCP4aImtA0oQ8Bk5ljOzOgRfP/g0CF5G1X0SxWLcQzbwfk1kHZ74P6d8jwYwvc/cP+yeCURnOI4P/rYt+TVA+1y+P1fy+dplofLlm0PKcvJSF8t1Tiadkdp7rpI0dPTOs2IorwVC5ODzxUem9k4jnisDg62I38woP43JgjiETihP05wIRr9U3iuU+bOBI9tMmjoNUKPECODRVCF5wblr+gbpKAMwD62PVuweJnsMZ4rSxUEcCj1xevpXCMFOT2OEVlCLDKl1CiLSJYyaGhuZVVn38pcfDvCtbRc7A9un8AzvUpy4leHBS+y4iuRkzw7nd8K1W8ySUridoWaNT1NwRErVTvrcrGZX44RiAoAGRv3BRL1PnhUWEl8JnXJDiDrw5XK5JFVQ8/q+uzqDdC2I4SApDPkgBLap5R9Qd+OifQEaP3BPAc6g/FjLmVWMIFtktlpGQ6tBqEx7RBABIIf584dggGSV0bciXINUldqOs0EPBIjzGVlQGhpENX3W4pr1CvjxnVU9AbOdCceDuAmpv4lvOCMSZL0JEIKyw3UHFJL/T0QMPL1i2vjmmkL4ky3V9KTboMoCVl54kOiNwleJgSA6IkioM1moFrClYI+jWBZgCw0MU9GMpym5gxzO6BxOWPKlSp9HQGgAd/7h1DtBzkti8sCsAfsEoJMHfACYWKZoWbllWB1JMerlFbcIDPtaVq9P+T0Ih1poZUjo0BCMcYG/NCVEWrMDlBlJNAe2/WCdVLpyZJLo10ACvqkxgMDdpjXk8nCTL5/0pInI/gxiLEhIEOgpgqSY5cCrJbk3DyphK8LZcNG8idVEqrBjXFHMQclxChZxSDwQaYHPT7oe5IJkKkoLVku3rwlh44cUGNzmw3JNPjhBgiT4YuHBFcCZMsmpAjMGmWwBAAQBRfQfrEjVfeRQfCIjwEq0YYgAwOQjbScvC4uMGN0OcwjiMNIZXuITABF5TCTt08jAMVjnOAHGSYEN/hP8N0AHQRUuBw/OU5sl3Ja1nJjOm4ETEx/RmH61dbWptu5G0GZubeP98inFy1wi30Y9o1BEi1Xzw1faCYyQD8LDOFD67Gjn+mDNplGwWDIYu3TvcQAsZvqDSqQ7NBzmNc3+6cX5+98wu4BMhSGSj4xkAAglufE/B8GCZDVHWvG+Wq1QXZWbmq6ApXOU30AWyEjh0qfaIMPxYYBYmmyOiDXhwPQn3CHaXv3OM/vuT9kmgdvts/xpZRiqLEfTIoP6MLOf3yi3e/YcrOPAK+RFy9ekp/8vz+Wl/6/V/S42rBxq3z/j/4IwOuOWHJ19uGLzbWNQHCVcPBL/EocrMaGRmhkv6YJ1IUmiZZq/+yVuCnJx6ADfkSryb/26JPy9NPfWPLk/FLt10psNx8McAK/p8+dlc8BgLz++tvQwN8kf/iHf7CoptbB+x7zAAkekei+p6IwkM/GyxFMdjMJPNfgdYWMyvz8Ihh+b9bneMrdFhcXSyjz4Lm2H+3yNGGmxwfNmz868qq+o2/ctFur6lldP1sEX9tnWzbcPNMGAQQGE5qUw2LYK97DrR9q+gA8QMi0UBlvRwoAEIpAzwwal1O+h8Hk8RhkqunTYY/kZMcdCdFwBt30lWClPdkdTU0HdDz3dneqDBblr+i3wmQzjaOZZKZPhT2CE9ScR8nDVvi7LnXwGKbxeXBFfqTt2o9h3nv27blfpYToX7lc5u58nuBY7t79hIJMv/3t34MN0gAwar2a25PBsFx9ScaxpsAYQDiyKuYbfCcbAphhQDwnrhPBIB7bZrI+JdmSWaO8+CgKToPDAQA/2uA4FUK6i8wnAiB2GSwaxdPbhvJXKuEGsM8u4RZqG7/7e99Tr0B70HTdj3NtwHvbIN3lZF4Qxclz6Ktp88qVC/Jn/+7/ifoaTNCDPkNkmtEYnZ6rZHSWFBWqF0hbiyUHFmp/lmtatOyPSKbqBFvIqlgOI3BeO3qTL0vXeKXky6Y7hkpBi9u153fM5wS9/gDYIPiD+viwQErIlWMTv7QjEBEA4QNROm7WTNQLNQadlLHKMD7ZyB5D8slhVduXgN1RWFgIsyGnDAD4YLggsZSQBx8NRCoAgEQkkXuHxwGEZOITyxSMSVorDKlZXY/IyqyRHbs3aaJqFMALmR+shnMBBLiGi+wlACAMAi/G9JrVbHkAY8o21ErnJFgoYFdQdioTSWkagxsPDa6XUp4jg3hI6Y0DajgFMIJoHm4c6SNjACc61OR7ZBh+IqkO6YbnCM8bb+YIIJYRmXAGoD/kMYfjksQfj7HBRa2wqFjBBzI/CHr04iKLJyBlkNj9P25BDuoWKo09nX3S0z6poAaDY0vEmreLPgBCecjDTSBZPzhkJf3GYabuLKvSZZk4TXXBKwUMEP7TgBeIf+x2ApWJfYIxBEOcaNs70qYAxnh6kXqdMJjc74SDPAm6LU3XlCHCUGYPXC2C5WXrLl6VjFWgl9ZUStVagEXYl4vQvwSaIq03uy2GB59ZA9fwJwse030l6PFMYp/kpKOapdcnn5+7IL8EUyfuEtZFlHb45GT67b4zqfpYwX06r+PmFQUWsMPqCZKNz93bqV+aigeNW8pIySsuULo5gQcyMvqvXxdWJW4s2qjHzfjwdbCNHBYw4cPhDpCCgAXZMQScCGi5Mwf1O8fr6vVrUlKQL20YHwb/zsRyDMPkIKBB8E+PWCzX47eYOgqIkAWCbVD2qq+lQVkkZM8wmNj7t//m/1Dg7m6E35cMebkyGcZvYw+fD54vePmIFCbxwQdWymTZwZB9+x5UA/U+SKvhB4vU1Kzz70UGSFMXrisAm8JFzxA0iJMjXo5nrG6xKKyHzDOtFksvVPtM/vv841KSa6c+hFpy5jS+qJlrJ6tcmIBJPXNbW7gxsDgr9HlsEAAxD8WRko2GAeIEgNzb4pMztk2zv/ax8GLb7D8fVOcS3AbXYXtnr3dKU7vlXRSqjV7cb+Y6PqHaiU2b2wiw6t3n80K+shcvboPQxL4gL//6V/LWm28r84Pgx5/8qx/JE088uSxVfXPrfWzp5RoBUyEbKlkTqg8EhilvQtNOBl/YmeharoREqD5FM43XK0p2dhdX6OKm2nexP9n2fKsXuS4TUG48+/Nlngmoh/c+BCbITvhawSgdxqSxWJwRSEiIV+D3n/+zfw4fxl71QvrFi/9D8vPy5evfeGpJrokxD5D5/3bF8G38s3/3f4WVmJt/yzPX5HXt73/2d/K3f/sPUVUghzpfCXpUVVagaDBvScG0cPvMaznlqF0Z1vsRK3lZbU7gIFJEer6MtD7nsw27rBSTvmRw8L6B1HY0TShQYtqyr0BJ70jBava5RKhENNdnH8jqWAOvCcqJUQaLpuf5eYVil79Sg3ckmYPvoXb2CdU+0tMz5F/+6Efy/e//k7l0b17LHnjlJfnP//lvhLJAkSIY9GAuYPu27fBQgWR6YZH2e7nvPRxLsiHI2GEYSbpM+NyE+70i7edc5xtZKYJu7e0t8NVoQ50nZOZTbr+rzdZmuGN4tnU4jyDdYgZBwTVrtqupvF0Gqwtg3pWrZxWUIchnyWrdloYL7kMicpZr162fUSzF94wvrrTJG8fqg0zQs+T5h6pldalVtBzc1mzfCahECjvoQbCZeRIDegRfdxOSbgMzkdqdz3wfZO0jxWKwPwiikJExDvuE5QrDAnHFl4T1AonUFwI2A9Ik+RN3giiR1o3N/3KOQNQZNyabBUn5hL4kSUPC2OGdkDa8WzIZTKkhVtsz6Ut5JT+AClpEEPzIiPdLYn6OjCe4JHHCAi/iknogi5UuWXlF0tfgRSIZCenA+FVVrhUnaHe4SqmBuBNV9O4HdstICczPKbd0AdOrrAqbzIBBd3ZfvzIRymrWyLnOOkj6uJHCn5AhejgAmOhk3hFJ7ML0VAUo+h1IeuOT/Zgaw0scKox7AIpcTPZLwTBAibYk6a32a/8FuEN33XkZc1sPZalpLplMouF6ngwhWZ824sBDHLoLHcNe3gz7gPJCHmsqoCXK3SIDZAS+IMacndPSlauQqkbnlK5iBb5Ox3MTL+A0cuLfSUj+TaVYT1kEdeLjgayiojspcLEkCBI/gWT+sJVczYAEVBaAgGEAIExqq+9JQBIrcQjoBMaBif4+MGrGirKkKzdOUjuRtIYEFhkfE/jXB5E74yPCPpHlkIHffv2+LXLK2yMHEo8pslu6tkYBo63D8H6B9AGjsdD6jV9AVXUJHgIcp3vlQt81Od7SLo0tPdIA8KfLBnhIXrps7nYoaEBQIR0MFR5r7IsyKBC9IUT3CBgBd5NR3OyNRJX5JMuF4McNgCEMUgzZHveL0lgJ8PMgq8ON71y2h8cx/uaYEPBqamvXbXP5/hGyPGAZD3YJ68PJImE/C3Dvp3QWt+kByGGkwrgelyFYwn3iCUbQkL8Xq12OnzijjIq7FdYN+a3bxnPw0PE7eqLWEma/TbWOoSDTXI9arhxnRnFB7YJ2715igLACbHtBOUAwXL2gY+f1jUs6rjX2mMALaDYumOW4LoUzdgs1oHz45ovPk9Wj0jKF62igbV96r6QMWQ90Q6TS5yYCUEiQmoySkBVpodrmyyFfRvlStWXLTj12+QBalZMlJXnZ8pMur/wwxylN8Pa50QWAFsuSGVRWtiZidQ63x75zbNZBgs65DgwV29h4xyEh6LAe3k3/i+LgqQOd2OCXt1B9N9O4LA3nHquqkgYw3exjb8ZI2wcIU5KfruMTi+UbAb6UtLa2qGHh2++8rdeQxpstmphl7Lpvl/zxv/iRfOMbT9+VBM3yjcTK3BIlDVh1yhfsuwUcEJxvBeDeB/m9NLwgFwLAn00ahcBHe0er1F09hWTATU0GsLrPDU32isqN+vLMxNBs+tF389fYtn2b/Pn//e9RGWz55S1VX5pxrv3Dz342J5kTexKVL/Pb0dc9qLylJBOBj7uRgFqq8Vkp7fLJnwU/jz/xuJy7UIek90+0EOE/yX/ULj7x5OOLfm2MeYDM/9enbNnqNWvn30CUa/Le+e677866dCjQg/K1K+V85bX69KkPURh2TKVtCnCNrocx+BdfHNJEvd1TIHhHTdI2ePpcv/PeRqmqgsJKOX/uMzVmZ8LVbiIerk32n94ETIAHV6SPjVnXbzI7wt07yRSJBigx2w/HAOGzOsH9tWtrdT9YKV9/87xKjBn5K7J9jHF08P4EP1MTRKhatSp4sUX/TuGtL774VH/7cBF8DBP02LN7zzRwd7clF3kMXLh4XD75xDoXadLOd6WjR3+jXivFReVzemcJNw6zTed7FHMQGRn5yPlcnPbksJuIh1uf/R8E44NBuS4CgsHABo+7UMfwqDHpDdf4HKezgK5m9Tb1xzl16vC0uTo9ba5fO6HvmHwf5TtvJAB0AvmzxITbxxW9TzORM9pQ7UGx8G22VQYUV9Lxb67B6y/ZJHwnDg5zzJKtQjagAT3u5nVXczZIbRKcmM3jwh+H69ZMRbzg3dPvNEEP561BECUSQyRkowuc2Ou4LFlggTjiU2fdx3CbUSZJ3OxSWjEPkHCj9+WcHhEA4YWkuetqgBmA5DyS2R14UJnyQ8MP8hQ0gGZ4tq6HP4XFM4pHpb87CenlAABNxkdW2oCMA5VOhNQSwQ9+8vvQwO1qD7I6dj+yXzKcDvETAMFNnYyOj6euARFul3xfPs88DUe2C7L5AQZEOQzLM1zystShZF/kCqASjRz+jywLqyKdKWyyEhilUoj/F0J+KR2e7UOS2uRFGyCkbCgXJySweA1oBejCSEVikgM13DcgHZB7GmqPBwiUKm5cAFvIImHgTSVrCllxSGs5YKTTBFAjDWAQQQsL/ICISx76gdxO7ijaw7WZ4M4wkqEEPwh6jGVZjBDAp9MgyBjpqvgNGGST+NrSZTLPEtdiUp1SWPSyHpsAqIRKmqFBeG+00ezdGigCBAwj90QTdGU+IKkPKEji2vrxO0LGBlJSzkJ4bBDoAqjF5D/ZPAnoK1kSmflxcg3jcya1VdublrUKeHmkZmTJqox++dmFRjmdMCldn34kVwEaGNCD61zrm5I4SJXlYpeSm+LB3ihWwCN7k2UyT1BDAz4ryL7qnwQeaHYOREa6AaB9drJFDckJNBh5rqSeJNlTu0r9VzLqgPBD4ov9Z1COasPatTougnnJqFrkcZtXnK/HNI3NTawqrwJgZCWOCYwk4Pfj9skwYhD4YBDsSAW7pxx/9/cBIEE7duCI85vBCqFMFwHC4b4s/Z2YIH7ssYeVjcJQCTf0ebk+hwDWpaf6IS1nfXK7VlVh25wpyARScnLyoMlfpB4z1AEn8+ajo59jX2MMEP2BA5GKa13JeLb0j14AxRIsBBAxeF0w7DAHxPecqTnihu/RXKuG+MBKTeEkf5P0+25p2xl4NBGXxTLKwKOIO6lAMtKKtQp6LsEH0n0PPie7+33SMtwuv/f5e5IFw7tePGAwCIJk3awHWLxeXtr1uEzWluPlf2/Ih+Vw2yXgUx3vlcHJZvQf1yr2X4kqtv6nVuAabFVYhWsn3HS+GFLuoTyxd3p8rLG3xih4fFgNF/xCGK7t2PSFjQCZHwQ//vzP/71KB9i1dAm6Efx49tnnlqTKeWE9v/fXJvjRCHPP9vYGSKWUS1npqjmd1wsdIQNkfPbpm2oqysQYzS//2Q/+LRLtKM8IkQEjWHL6zDF5661fyJlTR+54OSVbYdOWB+XZb/5AVuNFeqWBIEw80V+OBSxLXcl69cplee211yP+TPaX+dKqIpUa2b5th0qNxECPiMO3aAsUosL5D773bfW3e/XlA9MgyBBYcwRHKJW1mN4rMQ+QRfvplqQhJvRY3BYcX5bzlfeXm/WX5f33D4Ct0CgP7X9ePQXoo0AjdBbS3HffE2Gr2CMlQIPHJdx3+jgwUW0Mna9e/lzB80ggOft//jyMr5nozi2Qvfte0MQs/2ZCn7JAg0O9WkAQnDz24rmHzAwGi4xYVBlKKiu4z7O9GxgJoZqazWCJXVAZrE2brqmXAuWvtm3bD+PqqpD3PDsDJHibS/l9CknkcEHfSd6vq6tKpHbXDgDt+8AG3qD3nLsNepg+c9xYBPj++68o6LTnvqekqKhUPv/sbWUxFAHEe+KJ785asBFu/+cync8KOTlFKh919sxRfVY6d/aYPrPNxgIx5+AHH7yszH16llC2Ky2dRWGQFockXGdnizKiHDim7eccGSaUe2OQeZKRTuZnIH81l87blrUK+vLU44MACM3VW1vqVf6K1whKuBHkC5Zwi2ZzvDdWFaFIOh8etja5uQT02ZG4sH5z+8Ggh5EHr66uwu8PxZwI7KTwZ0I0exfdMt4p5LECjLXZ1mBBLCqCwwZzP86xrLAgA0EUP4rLZ2sjbOPznEHwwgAYrjiwQKJk8AVvjsANAZxw68c8QIJH7Mv9PSIAcxn90gAAIABJREFUwt1zgcEBToZ0tCLZn1ku7voGaZ+cedGIg6E5dZPiEyAJVZ4kpR63OD2QZhrvlkwkygL5LCTp00DuSJRb7W3qvZE5mUBMQJzeYancVIuEarl6fyDDLSlAK4tHPFL+ZrNcf/ewJCOBXLS9WmjeTXJViztJiuDjwbCrVU4VuCUfCcX2EQuc0QUQ1uUaAGeuR7oD006bmfysR5IdHwRXNEwRHogI9LCQEmt6T641W7Py+EeZJ8bMx9FshVrG8zjElgwDJbYY2RNO3GCgO4gkKJkeBD+mY8QFma5E8TV0adJeZbWQwGuFDFahO0tN1ZNtPiBcLyuhRxqboV/KJD+/Q0bBn1Wk/iEuUozhSUJZrkwAB0yPmvR0UZxHWvLHZQ/QGFc8fEXcYJ3kVEkOAKw0MFYo4RUHTVSu21GQJ0cnLmj7lLdaX1kq6wfHpShuQJLbU6Tj8DE5H98iZ8C6iMvKlyMtHdI3iMRm/5R0ZDml9v618ijYBjSRJ7jgrwY4hE9Kc9EHhi/+3d04VuIn1AeF+5wMSbWu5CaZ6Cfo4peyRIA9AfCD/aA8lcp9gZFk/FfIViFoQTYSWSx5NRVKs75x8rQCJolgZxCYIPhBYMcua2WAEwI/PI5ZmOJHe5QJ47IM30SnNN7A7aq5U5k0/EdhHbI+MnncItSHBT8FtzcAmbRkn3V8bNq8Wf76r/8aeJYFaCUCwOLfS/2pnUKM46G9eSBeegfa9buve0reeO0V8b7x86h0WHmTZ4WLHfSg+SnHKx3altTvP3funAQw0cBW5/5xLzBA+HBM7dLR0W5U8yK5P0hzxH4FPQ34YT77+5rFh2W8GV2S6SqBhGCBvhjNlqBjgpCmi/0j9fAxqoeRd7+ARKHtm2D7bLe3p1+3784AEJJRjAfIbH2IDJVENOsymZh99hPJZgX1zc+k6NBrOotgh7eiRpz1dfrJICjyv+BfS8ezYIS5ZGD7HvHmlM7af7ZvHxv2n5GC/hEYMmPD6b3tZ6Q/qV5GPBW4LWQjIZ4/a9tsJ9z4uME4mQaeFjA+2tlYLMoI8AUqnJEgDTljsfwjQO3wzz59A/4Ov5Tnnv+RGkoGJ3KWole8bg7BH45VlUwsnDvzoco6UA6NBQRGLz24LzzfL106KS/+8r9oEoCGr3xhzsu3WF2NDVc1MXTw3Rdx3fHJH/3R/ykV5atnvQYuxf5F0+ZSgx+R+kDWHCuGeV6ygnHjxo1adbtu/Tp9mV8pCahI+3EvzecxwefHP/mX/0p3643X31QQhAUsx49/hgTsPlTwZqocTHlFJe7v4auqoxmXmAdINKO0cpZhwthUHC/n+RoCh75jUMwy9gRqd0+7MPF6+dLnep1+6mvfw/tmHt4BO+XI4QPyzjsvSl5eCa4/22fcdyJtj8+0obZ3R6cCE9gnAhc7d+yfvtfYt02AxN5vtk1j8es3zuP+9GuwcH4JxQmaN2/UdmgazcQ9QYdmSEtv3fLAjGdV3t/6+jpROGflJkqRpOb76WzP4qbv4RggnM9+sdBq06Y98sXxd1EE2SiXL1teCkxmM6lM9iSXs++PtW4UWdFwA7gI08mEYfC+U1lVIwTat21YPwP04PWM/5goXuzeRjqmrDGydtQ+dnxO4TMSZcfIvHnqqd9XhinjLbxTv/feyzC33oT7564Zx0Ck7RFImOsxzEKy2l2PyenTH8iVyxe1aKS4ZJXs2P7QHeAF21b2ddstPQfZVz5bFRZWCAGQ3Nwi/U7QoaXlusqIOp235bS4fn9/jzQ3N6iSRG5uFXIeuYtStKGqAzjneS2jDNa5cx8pmMcgwBNKws36ZWb/P/d3GEXV7b0zGRsERvI9UG1JmTsLhHKl7Ke57hpGLEEPAnXMb0VblLDYx3So0YiU3Oc69O4wMm6h2uA0h9+DPB3Lf0NH31RDxDZCr7nwqQtlnhD8GRiHGk7A0zq4R2SA6LzlQKyCNx77vugjEBEAIUrMIAhCAGQYEljDZVmSPZAlw22obh/0w5vaIVN9wxKfnCG+rBRZvapEdlWukRH4Ngx0xMukv0OKs5LlPG5gOZCh6pscFU9cqnSO4iEdLI12JL47oM6Ughv4iK8dVer5WjE/6E2HfJTF2KAGMrWR969DcjplLdoldQ9m5vnFkouLGMPrm4KeabasW10tvUPp8L7owoMGfCCQiHTA2Ybm54w4AAaUvmoZiZei1El5u/sqkOabOm/rqlqpBpBwHdTYwkELMmnN8ABMqdf5jNapbilqvz10FwAimCiMY7rcCgItBFUY7SkWg6UmDabxaQXoLDT/IY1VuKlMWsBuYWRQRzKAvXhQBSjdLYrBEIjIIfiBBCoTg/RGmRqzGBLJ3gRpBNBAYIQMEMpe9aIyqBj+IumVSPbhQTMd1WEDuCkisypFMOia8g3CAB6SZY3nZAp3s121e2SqbUilwkbj4XfiBCqePKWMlLhxwCX0NAkEwY9v5RRKxdHr0nnjhrzvuz7N8iDY0Vk8ASAlCRU8WZKPi6THmyFbAGo441CB7hiBR0YSkpgJMghggDpavB1RIso/au0PDeAJNqi0VdqwpKbnyxos44tPkOEpl9y/Y520NTeqZweDy1N8i98T4vsU0BjHOGXgpyMLJAGAy7DXOj7IGnGzCB4AiJ8MDyRV6M9B4MMZeKwqhxRRG1gwPX03lN1Uke2WTNBY2R96tfhHkzThz+jHepYBu08BlUYwSwiuCPui5ugEY25Dc7wZEui5W9GNSv4zJ+vkyJlWmJZZx+ytuPV4eN+gsjNG2sreP8P02LihRhkeW7dtgSb1Hvy+pQp6kFVgEjYEQBYjvsweICaB5/ODhTTQpAAEgYn4KRyvcQD8cP7yvOd1iUFQgkn+MZzTY73XdTqT9GSEhEr2M9E3MtIOcK17GvjgugynM38GcOBE234w8Ua9uL7QTwnnRTAQYtfFN6ANgY98VFKnfviaghv2MOAHp/Nv/jPLKEiCfy2PAgjZ84yMohque/N902BLOFBIxwaRkVWtY8Mx4xWHY9OPC6Dpf287+o/9CQfksH0+vOn4BEAnM7ZsPzkRnkhonwAIp0czPpGAohmDE/sypxEw0i5ciUk8Mu0uX76uL3D0GPqr//rXQk3qZ775zF29bs5pp5ZxYR7vDJM8Md/t00x37POCuxgq+cIq1WbcZynn4MfzE4NthFo2XNuhlg3edvB3gsZffHFYXnrpL7WqkvcmdyZ8vq6cDF50+jtfyClFwoQUDTSZvH/2uR9qVa7bZQHCjY3X5O23/4cmrMgOOb9zPxJCAJuj1MkOu/F7ZIZJPhnAgy/xuWB4kumRm5s7p5f5e2RIVtxu8PmRIMj//q//VJ9b//EffyF11y4pe+7nP/+lgn4P7N0tP/zBD2X/I49EnXwJ3tGYB0jwiKy87/G46PG9cHvtViktLp0BUs41+TbfvTMSUJSCYfA7DZQZZEAMIEnKCnkTvBaTvcdr/GefvatsD77XP/rYtwDcbdSin0ceeUEIVhMYef/9X4Gdgfd7vG/yXsKiGXpFmO2NjKDALFCFzmKgHrzrmu0xOclrWjTPb1xm/YY9srP2Cb0/sPo8GbJWNLLesHG3Sq+SDU05q35s7yoYIgTnCdQQXNi2/QEkujfiXSgF96v1KuHT3v4OnmmO4Hx9ACyGjdPV8UyaszqfVfo8XwlYsJ/RxGwMED4KcD8qKteBDVal7RvmJJPzxjg6GPzgdsPdv6Pp00KWIYuJ/rIbt2xQT1eCHjt27ME75tawTI/FThQbCSgWWDD4vafHUrkgA2IQOSACBSZ4DFPujGAUn1MOH/6tziJbiZJjnLf3gW+qiTefRd7CMweBPcOgjXTOdHY2S3NLwx3bCy74mF4g8AePz9WQj9q1+2v6zMYiEAbPQR7bZDSZY5jFLdeunVf20qGDv1Km7Pr1D+hyXIYgDr02bt6ok1MnjyE/9ISCOJzH4LlGllRD/TllO23dunv6OSu4X3P9zqI/br+kdIM+p3366fsKhKjXGPxBKNM1n/CPT8IHskd+8zEK9+AJacIDb9rffWy1bKiYe16mrKxEfvSjHwA0WqXFIQu57i5HPp3Jff/4YNjkvv628AkZx7EdKRxToT2OVCLqSxwsxJtNBksZIEgnshg4VL7sS7zrX8muRwRAOCp9ADAYOVkZklQErXpvinTFD0paDyrpy62bNxkgEBtRtLkff7WP3pK+pl7x9w8A6JiQeFSoJELKaAq+Hu40mJ6jveLAkJ/Bp5OMESTdGZa3Eirne4all0yEHdukxOOXfGcZJLOQvx5NBPiBByFU8uWMxcl1/7DkpmVLcS6S/mkOVDz3SP+QVWGR7YZh+LhDX+bSkeSlnNckADyatDNxHpeUKF/LXi2nAoyRbU7LE6TYgQ3B1yJxIE4eAMLtTbce8gZhlk4T9YSUPjVyZ9BLhDF0BYbuVfnTBvDQdRGPa4t0J3jl0+M3ZaoAywEAYTCJR4mvRMhCZaVZP0NcD/w+KKE1DNksV7ykbN8sxbjZTMYDFcH4eRuxz0jeMYlnTOLjkNQbmoqTkbgJ9ROJnxyQBMc63UYuWBQtSMTzobSwohT5V1LxXDKKddwOJB0bLTCHy6ZBZ9iBJGlSYo6MVMXrPrJfQ5DjcuUlA0TKlCdLHpM/QrKy9+WD8t8bTqqfh4lbeQAwQJIh+MFg9UFFcpn+TcBlGDJblPNKq4iD9BnArIJ4nUamR7oLvjGjAGRAw6T8Qy8lopA8JfSWQH5FAZCZTph2wq+kt9/6HQqLc6VVWRhWD8j0IEhRXZgtgzhOOY8MEKLwZLO4sHwf2EZDAVkr6WuTDiSE8wDUMcjaUDN6ABR+AHJA9cQDuSx6fYCqpEyTZLA8uilrBtYK/W/SMbYeslk4DZ9lBUUKwPTRJD1gos62LWN5q5938//jeIDv6R+Va019kEybkKSEBP3MQBKIMmy+wL3LMD2CQQ8mRVj9yZevpaxS/TIyQAgU88HSDnwQmJgE6GES/GRnENggMMtpSshKqlDWA8EPBqcP4m+CAJrsD7Ae4uMz8NA9OINNYtrletxOMLjC6bzO9AYAFi4/Cs+gDgAiBEK4PBknGWllkt7bKsm36qUgDPDBtsj+mCpbD1C3jl+VBTLy4LOYXjcDKLEDIelobwhVcC1rVuPhC6wPnDuGDcP+GB5hUgCcIJDLcdOrCMaGfSTwy2UV6AkD5FAKzA58mHW0owiOD8dZ/w6MB7fF8SWwwrbt49Pbc0WXN+MTzYt0YFOxjzmMgEq7fP/78t3f/y58HnrxwvOZ/PLFX8rBgx9ohTMLLPjbvPDCt2MgiG1cmQyiRBUZEUwSMW6gKpVVp7z38mXceGWYZetvXtIX+jE8gzFZ4/GgkAEJG7I7DNuML+j00Lh44VOpq7OMJ+vrr8qJk0dUZ9ouq8DECRM6fJnmizvbZmT8/+y9aXBc55UleBO5IPcdiZ3YSHAnRWqhdlu7Zckly2WVXXa5pjx2T3VtMVtEx/SfjugfPRPRE9E/pscxS7XL3W5XjVx22ZZtWbbLtCxZ+0ZxFUECIPY99wWJRCJzzvkeHpCEEiuxUcwbQQLIfO9737vvZb737rnnHDSMNDZ2ACTfu6xcVblThEAGpRUGBrrU2488+pzcf//n5B10tJK9sVywu49dtZOTWuGChSyCH9wvjslgJzElSag3zwd7djZyW+xsLFcUWm5bn6TXKelJqTkd9Dh65KgqPtXV1YkRrOStvs5/knK5XftCEOTQocPy7LNfkMuXwGg6/bJiz/HejcXhF34yoRp07r3vXrCdda73+mZX8QBZX752aumHHnpYaGROkJKf2dJmpK2eE7/7eZ04/ZvnlW+GHjTgTqfIiRf5xS++J6+/8Uv1O43OT6IbnYViMvVO/+aHiiVBiSDKM+myhrxuPfbYF+W/fvd/VzJCvD49husA7794vWPH+sjIYkGa22PxmtcHsv888J3Ut/fww3/4se579eaSILhSX9eMRouvozEsq4CN0//8A2UmzkJwe1unul4yeC3kdZGAPAEMXqMef/xPFkAagur33f+kKg4TxPnhD/8vuefuJxSbJYexu7reVaxKFp0p+8Wi81olfZYyQDjX0uB+UAqJZucsvutF8LuwfRaVlyuic72dCF5fbj95Uv7tv/m3Owa0q8aJ0zyHryhmKIPsGTYE8pmYLI5z599Rr1fjGZ9MoXvueUJJmP3ulRfUeUfpK8pH6fdbZCoQ1GPhnuwQnj/BwNfVOU6A7tVXfrzuz4wRHrsrBY8hwQGeiyn4h/0ezWvvvfuymkPngbswhwOq9sMYGemTixffW7in4nn4GTCw+BngOARLeM7Sd4P795MXvq3u73hPx3N4aLhHXvz5dxek1U7e/ogCfjbjPoqnIplUBw8cUwDIwjl86gkF7i13Dq+UG/29SCItZ65OSAQKKqx9MJrhQZuebl3L6tctw/rHXngOt+/t3JT7pO34BLI5kOwMJyT6y/mAELxQMlk3GBxjLSDKDW5my1YniyRlGIdHsFa/3LINVQbe8QysCQCJx2AY7TVJ+4FG8fldABiS4owUJHgoJSelU6KjbnyRFCRgcstgBF3xybC8/iH8PYwpOeU7Cg+HsLzRr7EcQgXt5kjf88gQOpnRMc/ufHaEmo3+BUNvMh1S00kxHKmVkKFD0v2TkjWSpqCtPQEQMjUKtBy+FgJf9CONh3CjYsIDfl4GB/rUQu1t9aCGOmTOHZRUX7c0OKtxgQC44YX5Eb1I4H2Rt8FPpAYACwABBkEI07gBF4y8pK1ASxAtDkgpmKBR78H4dTYxg9EQi6dkOgJKlL9NvB6nTHj6ZdoEM6qJRcZEbbtHGjJNMnk3WAU1qNYDNLAb8RNItzVgkGGYcZtQ6Vf+KB67uOag5485AkoRqx0aojmCHzRRhkQVcqSzQAiepOapo/VNDShkw8y7F4wbNGuac0nVda4DP+yiTVcV6AwgyalhsThrIO3kEWMLtLzSEQV22JWcFy7yYInACx53rNCBtMMM3gdDcHc7JMXgs5Gxy9A7F+VlgB8D8DPx74c2P/RLjeNFOQTg51evQfcSkmSU1aKBvBMm4KTlZwAIuIJad1Au55JpG9gGbrs0oADT7qgXWzoqU7lRaQztVXO46EzB6B1jIAd+dLOMT4zL2cnX1XvNe1qkGILJO8AThgvzdUIKi5FG1496jf86GiXv1dhL0/BE4XJzYJ2QUUNOEZkbIXie1ONfCt4j/cNj0lJDMCOtAA1l6Q5WR5zm51gmD8ZSNqwVePhW0N8ghbkMtC+takyCIjRT14M+Iwz6iix4myy+vSO/0ezrzgMhdC1pH3uaQg+/8xt5E8UjBgtGBD0O4AH7+LHjuDk/BNppBz770PfEnclWgh6lCbkZGSB8OCH4MTb8tirYKuADO6WDFPybYARZCHGAmAx+RgXFSBbZCWyS6aADJnqxn8X56flCPBklZIiUAgf68h8DV7A9Ag2h0AGpApBC4KAUjOG2+F1CoKUGN7qBcE9Zxod+XAh+EOxg6IwP/uRr/LeUKcLlNNmsFzTZrK88I2cbWzQQA+/pwAeX08GJagCQnDODuSFQtJibxYe9UqCCIEbIvx/npksmAFqslB+OxRwqRg62wZwrybESAEqfD/M/g1wruSwAu2bz8g+QasKV2FAG+J0CnqEyLAyhiPPU5z6HokOHKuL98Ac/UUyQb33r2wDmrfKFP/zCphv+bmjSO7wSi080av3ZT7+jiv533vkp1eRA7XRdT50FDz5wT06N4mH4J6oblA/D7GrUg923fDhmp+sdYESwoENDzNd+/yPV2cgiDxmllNRg0efw4Tvk6c+x422/6pSkhjsLUuwSJFujNNh1ymLXg596ds3+IQQyKOPBblVqwLP7lt2x59Axu1p4IcHwxBNfVrmgTjQf4hn6QzllJUqNQnNgK1J+9FaNQLBGvvbHX5Uv/EFajkJbva6hQTVxbOd1/lbN/Y3sNz8jg4MDAIrfgAzPkOpCZMMKu3NZsGM39V133XlDhSLOr+IBciNHaevX5XXz+PHjwi76nfjMKjYEnhto/qxfJ0r3miAIC7B60C8wGKxTgAYBbTIUKH1FkEIvvHJZXrNYUO26ck4Vj19/7SU8wx9WAPbwEArK8FcgQ7Q0eO5zeyyW8nrF4PY699+GbZz8mATQdSvP/8HCKuV/vvjFv4BxdavaLxZ/uS1+rnSjbmVcjibPTnSjf/rTT8upu59S1zcdRCCYcerU45hPRAEd3AcWkmlQzc55Xid53WXRmV5UvE6VAhA0RSerRAdcls7VhoZBBseotmigTOkyLILT7Jwd82SGMQ80jtavh0vH0/+e3QGpUZ7DLCBvVhF5uX1b7nXeR7Fx4sMP38Z59auFc0dfnufShfNvq38MBxQPKDVFo25KX9EvhvJnvH/iuaOHfg7QgJ4MC95LUSKNclSx6AQYRq9d99nQ11v6meH2+JnhvdlamKo8hynr+YU/xDncuEdtl+cwwbzflzmHOXfeo+neHzq4wGYYbjMF+XMCHQQiuK9kFinZZTSP8j6SwA9lv3R2i46jca4rGdtzf/XzfCFp87/wfo3n/v79dypZMQJRPId1Cbely6/nb7/bISf2oZG4hAHSFESVCZ7DGwn92WUj6y5dh9f1rQ7eK0wZuqTJcFfZTdH7Yq1Bn49yMlE7ZYCuz5vm7DcaZMpk83EAIOXDaoCHM645FQZI+fzcTK+uCQDJpElrxRf/0Wa1b0HcpOwDODaF7r82X6MkZ+cNa80OqcNzcN5LPJOnD+SDnAV4QgRgNx6Q2WJErXt1YFAIfPibNNoZu++LfqM4cPGmmXeh4FEMB73AT6PyNIreMQAgjIFZdEs7O6Sz1qY6HsXbAQBiGsW1GfF6PbiowVvEAykixCz8NmgWbkxMyfGOFomPJaSm2qdYKalsFHiGW/prMQ8U3jpQxDwI/xCCGTEXuoMh1cUw+Ar4f0zNn5FOWLAMLohu+Gp4AoAOAuL0ZsBsaVY5MXvapNPfCBNwFNDSJpkDeOGorocHybQyQIcAFCrmmkbvkXobxoWUWKJGxpJpgB8jAFEawaHJKSAJC4ttziApmA6xc5mm6ZTI0XPDgir3D3bfKu9FY14y6BJzAbBIDM9AQmRIzdlcU5BZI+Rl+vElB4aFFYCQHmMT3SgOgMKLsaIwU86GTbhhgwkWvjD9eRwbmI/vKdRK6swH0jPbJ+4Td8hBHEdKdnE7c44ZiQ5dk1A6KHO4oIyM9kkCIEcK6xJ84nHhcSgYiBCg67IqJEYYZcdtKZkaHlbsk9m8T0YSgH28M/AwaZIi5KykoJ1XfF8PdjFkwxqYRjmv2iYASRHQo/3oRh3LoxBkwk005K0sGnBldINl0uBUwFABJstksxj9uOh54b/iLoKkY5FGFFBpGu8ozkr36ISSYqGHCAsCuDtXgEgqS1ksTeJqDgU7my2j/ErcBF2AloQh4WXLxJTsVjo3reSzGKOQxHLta5s/Rgu7sSO/2KFz+eCJJrn3mNYxnIbU2/Nxt0xMhNBp+/XrQI/tMGJdLgk3IwOE9HSLCaDlfDG9FPjg7wQo6L9B6Srd74Kvs4DP1wmOUKJq6XoEL3RfEJs9qwAL4LsLweXLgSvq9fn6XinLgSvqQIhhelJaAGa3/fjnZQGM0uNDqSsyOVw/+9vrDpth4JIkP/cvFMhRDgThwur1fwCT8EtPy1UA6GSilc6DueE+qtfmvUA4f+UF4ndcx2IpXU9njdDThEF2zUr54fHhmAs5xrYIspcCUBxHf5+/k4FCTxYWUCuxtRngXYPe4fw3f/VXyofpv/zn/6RAkL/9u79VEnz33nffhqVdtnb22zs6AYhBdMSy05MMj2RyXD1U8qGUBQ87CiVkZxD8+OEP/k/Vrcf3jh29C9c1u0TCY6rI89abL6mORxZW7rzjEdwDmJQJJgEFPsSyCEAJKprEKoNXPByzcMBORna3sluWxZYHAIK2tnaqJOjFqx//6P9BZ3oa3ep/sdAhu1KWWAjiwy8LBfw+pf76NLzhVguuF4JudeihZxXjo1wnIgt2aYA7fIBnQcvNBg/InNyqwev7w488sm1NDbdqnjdzv1kk6evtkf/8X74t3/nO8+ozSO+Hex68W5nT+3zQ0Iccx12n7kK3svZ8sdHtVzxANpq57VtvMwtw650174fIPPzSl/5mQTJopTF4zSGQQb+L48cfgEF0BxquOhRAUdrVze9yNmIRiKA8FIv8mr9AlSo8f+lL//MC03C17R05cg/GNpW9HpRbl9e2fXuPKa+MY5CuovkyGR8JPCfNQBKSslhBfN5obk2QnQxHvetfH4/zp2TXU099XRW9uy5/oLr9uT7ZKWwioJQPmR8Efkr3vbGpQ+WThef9+0+ouZcGr4lsbPj6f/tv1MvMx9LgeMdvu1/+uz//d+q4kI2pSxstXbb0b173dyLW6pGwFXPjseKx/vyz31CNE6sFz2GeszyHeWy/8if/SvllkLVU6tfIcQNgy7JZhMeaTB02X/D48Rg/88w35IEHPrva5tR9Gj8zlIZba3DbbFDheXr06APKB2ZkpE8xVvRzmGws3qvRn4TSczr7qnQbPK8J7HB/yVoiQyYBH1qew/v2HVPgIj9fpexh3nfp+SQLqtzc+b2x9DxfKu3Gc5jz+rOv/y+qvsdzmOc6WSYbDRqdH+vww+/jKGpRrOehJoY6me4BstFxN2u97Wou5XxXMvlei4fGSgBBwjC07QbopceA87egnftGgkwZSoGREVOOKeMuarWKG9lGZd3dkYE1XfWiQK0Zly9EZS4XBYBgVADCENDZ6AH4N0zNiiVolhCYCgwCBAQHAvCpICBA0MMFcGTwWhE67w5xDY5LbGRc9t17m1zF8jF4eTQB5fWGNPROl3cyQq+O1gwjymZa5M6jNdLucUsvZLW8lpDUwVB9LNQgB5ov+ngxAAAgAElEQVQ0BkAqqnVHpFJJMeHiwbBXo/APPxGyEDLOBCSQNFZAKgtQJ/9xPcHUGLq4ky7xwKgd+kZqDOhpobiudSbH4EVhQpciwQqyNppCFhmNUI8fczVpAEmbJy5dYJLFq1JSnLFcBzZwuCTkqgTdzVZQ/ZrgOUExsKQzLY4UAQKz7HMCwEC+hmJgmwB0MQHQycNs3Aw9pgkYdZT6BLBAyCAIYsdFqzQMKP7buB+IajBLkoBgCGzEAQSYIzCrh2TFh6jr0ePkEJbJOVJiwpdHwQzvFOSngO1Oj80pmaqH7m2RIcj8BBtulyJuFOlCz/oqJcEaYfh09txZzRvDPyvnNXxCbXd88Mq89BaKmwBzquDFMgvTKR6FJFgeTnxd4WwQQxSeLsmMWOFNwfyaQU5BSVMyM/BuoSTWfIzCd8PpsENKTaMwpmPVYF7kJNc9BTkbH5gmkPgC48WIY0d5sXAE50ohKm47PEmaWhRjJ+xOYPqQHgPQYgIjJuqMif8gEL3UmLR5a2VvLAS2TQJeM2AlgalCGTSNcbJHEjF8BpCPOczcBXN3wc0qjdFTWbxC3w8wRliyoaYp/UCMyCO9RnZDFPEAnZ8ryhyKQQxTtVM+9akHcfN3BzpGGtHhAZ1Q3BTsdNyMDBDetLndzUrmSWdzMI8sprNQTxCCLIXpTNcCC4LvK78LylABBCH7gRJV160HYMQLBhZvsClxQFYEmQk68MGfpeCKkn4qOYCUnOLYZJGUymxRls55LifHITOwHHBReh7Q04NB2avS4N9kQC3HAtGX5TaOfx9/AQTpbtAAEH0fdJAhHseD4jw7g+sRDGE+9dyQxcLQ1yPYFAweW+iOCoKNSEYN5atK5cf0/HAsHWDhODrLxBO8AyALZO2WAFBcj+yScg8IaiKV2JIMaJ2Be+XP/8XXJJuJKH37C2cvyvef/wdpadkDqcc9lcLtfObZNUhmx9HjDyoj2ZpQswI/2KnIAil1yHXw46tf/R/xcHxAfZcQNCHL4nvf+19Vl+DZs79XGtLsIL3vvqdU4YkyHgx2CT7y6JcXxi01/yT48cXn/hoAyOfVQz+DmtnsoiUAQp33ttbD8hDACcqjlgMnSk8ifo/qRaHVli1dT/+93DqoCSh5Qj7IswOXHiHcdxYWyi1fbtxP2mvb+cD9ScvdTuyPDn586//+jvzj88/L2NgQirUH5U//m6+g2Po0Cm2dikG/GVHxAFlfFuOzCUnIFL7fNtZJvL6t7Y6lWWjVQef1zogAAYNjlAtenzr3HVf/GLr3FIut/LeVwTmx+Hvs2L1KPouMSBo+Mzgv/dpaWvBeOh99DDYUsDgeT0BiFQ0Lq61fmk+mZum1idfFteTAD28YXm/XEzvBAFnP/LZqWd7bn7rrMTGAtbPW4LHhsVopeKwIRPAfQz+eG/3MrLStcu/xHD58+E7c0x1T5zBl4ngOMgjg6L42y30GuRzHuA1gGkFKMo719T0eNL7i/rLcZ2Ahn8t8ttf6vcFt381jso7jUi4P+mu833FAAWOP5eMgCsGRnY7tYICsto9rBS90gGC58VYzUV9uvRt9nUxY0UpbNzQUmR3TBhRwy1+e1Ng0gs+ugzFzQxOqrLxlGVgTABKCBwWL3ZS3mrzYr9ga9bZGqQIN4kx/RBqBKJvA6Ojr04AKShcRDBiHBJLB2iJTKEDHEx8qoOPiVEICng54nzfJ+IVp1WF4orFeqlvbwJYAlRXG6ikU9Rhz6Na9ZogLFT8JVTxx9LACBp5wN6Kbbxqdiigwm2vFDDkjxpvnT8twPzr1UTCfrfaiRM1xXPjCd6AwADYFivtFG6SLgIaHwQI5VH9ChtH1f3ZoXPwAOZ44eULq41YZKcCIGD4lbhiqk91iSNdIscEu/SPoHsyHAfRoD/oeh/ZpK8I0PA4FKW/THuV7okVYxvr6xeU9CjbHYgTAhDAAqMlZp8BkCEpvwioGe888aKSxa+LpKiDVfsxhUOa6UcA/eFANQJBkOuKRqVhGgSheFFZLo9YHCS1XXL3EMcRTK0V0fpIZkk7MiMW1V8yuUamVFhT0cTM3g4sBusAZZFIAJlEAgccP1gMksshAcYYyStrMFoUxPNgfLk+DJMcBVASAgODCykgjXzQS33vffs0XZbwHGnroFsX8MgAbTLa8Al94WkTTefFGrRIzAnQgu6QmLz7ALsxaAWBZNoxzzZcXV1VOsTO8c9r+cDuGsbiEW6cwJ3KCcIM4npRRgCZkDs2BzRHOYz5OPAQCZGGJNWrJizEdkhSAINccZM5g3JGp0ubM8SgbQhaICx2vKpx1MF2H4pg/g3PdoMAPgiQpgCEOHF8Putd9uTa1aF8c28W2kmYAUzA5O4zPAs9lSxuktXBMCXoQKNF/ahvY2f+TmZy8cX4EZmD8LGhBOayWWr8cpkfPLgA/OKubkQHCeesXfhb0Gcq8G0EWh8feqjw8lKQSQmdhsAhPkMLqqbuO6cDPjpKuQszhgswumdnZnGbijcI82R3KK2MJuFLKXuDYyvTcc0CxHDR2hPbduo+A7RobGWhqnjh5twR+/vcfA0sIbMThvaN8PrCcJnulpr1smMDgys9/h+ogA3PDuS6d/0JuACDpXh16buiFxJzoGsr8ncG8kROngyBKegwsERqjlwIs3BaPB7ddCkDx2HBe+nFk93hpp96yO1Z5Y1MzQI3dL335K9Lb06O07n/6019CHmuffPOb36xIYZVk2o77G5rHsmBDkIHBByo+oNKgsqamXumvU6pAf5/eF5SYotmk5okxqB6SA/4aBYLU1DRC3tKhpA7IuqSEBtfluFPhPiUbQR1z6oufuvuzqiCgP/M2NrQoWQVKPZChcv78WzCYfQRMEdXVsO1BFsnFS+8qSQjOmbrt7Cgs9wC/7ZOrbLCSgTVkgL54P/npTwF+/IMCNgl+/Mu/+KZ89StfQeNK3RpGWPsiFQ+Qteeqa6Jf3u69LG13fVr5GFRi9QysVHQtt/Z6ly83xnpf04F4Xg/5rzSWAhPLja01RfmUtOR611/rNpbb9npf3ykGyHrnudnLb+Tc2six2cg6m7GvvMe5kXOY+aGkVXOT1mirz2m5/dlIPjdjP1cbIw/WR+9IXD7snpDc7GKV3EVZ8IN10hDUntlXG2er3t+uhhSyN1Qxd0mQ7UDWA/3EVosFgKDMghzDhEbNnZCHUtsEaLESw6XMlMu+RCbJZoxTdvDKi7smA6sCIHloQ46DBWAoTCi/h5Z7DoopE1XGzmRRFABqhFr8EoLu5EzfNbkGmlzqWp844b2hWBYOfODQrJ/IV6OjvhreDhnxBZskY+uHlQf8OCBZRDqBEwBEMbdYkSP7I4WOXJAAVFA6ilGLzuVxGVa/o6lP7FXT0vXesDS32cSadaL7n9r24OJB3mrGCnNwMAhCoIO3w+icoEkmFJEa0Fy9CRTDAJBkYcx+9dzbYp9EYfA2+JW0GOSOrFZ47Ab4QcClZo8P61ql/ahNJrEeowYgDIN/17oLMg6whHNL2eC3Aa1EBkGUKsinaNac6iUVM5BIUvKd6FbOFXMonFrQ6QwWSaIRrIWoDE9EIZ0Uljrscm4G0k6YI6MYrVKG6CnM2wTmwYwN5ttEFTgUWC6mOD792tTnmSXIew5ADro0hwyg4Dri4pq/mUs2gMEyclTkQo+4IC/WAoCHoImS/8KxYCkx0ADAJDmqXpv87SXQOAF0gVmxF//4Ghk3+apxJUElck6qZt0AODS2UHxsVHKhGTHVar4mlLYia4bAhgFME+M4jLgBMDS5ahTowt0w5OvECQYHgQsTZLcEYMmcD1RrAGSBcbeE8Rq9RWyugGSH4sgLipcAdwxuCyTBUsrTxTxvJA/rGDHHUTQG28RlD0gUTBWXtxpKZxbxpWZltgrSXGAEqQDzg+AHvVBqIWg2lSko8IMAjBkgScQECitke4yWGqnGhYLzcxkDivljgwt7IwpG4D/huoJ9T6ELrB7eAhZ442D+RoA67tog5DxW/ahpc9nC/7Ozcwr8ePFtSMiVxEPH98jdRxrEAYms3RA3IwOE3TGxRK8CM8gaINtgAqcDGQf04WC3cSwxdp3Ek57r2dwIfj2wUITnUfBhDF0yayp+BR42WgGf3j9NYGHR70JQ0CcLYjlwRTElcLPDAj9lnJQJONahz8++0XGhfBWlrRjLsUAobaWzP7h8ubC89TNJ/OW/k2qyRE6/UG4RzQcE2zr+/Z9LJ35eOTUn/XvaNV8UghPpgYXc6OAQ568DONzHuLlPARt6bphjskYKBYDxYNfwdz0/BDvIBCEYwjwSHCkFWPRt8PhQlqzWf2gBgOI6zDHH5zgMux1AO5lvldi2DPCh4I47blcgCPXur127Kv/043/Ca3dCsu/+Cgtk/kjU1LQraQyen/qDKR9ECVo8/sTXlNeFLveUSEQBDOJBAZGGdxqNPRmUR9A7/NTfuEdZGhybIO/4eL9ih1CrXDP4rAFIqzFr9XVoxkozUAIglOqamhpB12Fw2a7fpdvajL8JyHB/CX7QIJdMFxruPvzwc0p+pBKVDJRmYGBE6/RmwxR9FXZLzMCv5rXfvybf/e7/t8D8+O//h7+WL3/5S5DACmzJNCseIKunleDHr7veEd/t7fL1P/tjSLtUvlNWz9rNtcRyhd617sWNrr/W7dzocrcqA+RG83YzrL8Z5+BmjLGTuaLyRc9QTH7w8hU0Ys+bCGNCrXVuqQ86dhwA2S4GCJkL5ZgNLPav1wC9nEQU/TF2MugBcqMSWGSS5NDAn0MNutTnRN9fjs/tJGRoJ3e1su1NyMCqVVm9cDswGYHGbEKBHTVBTeoq7/UreStGMZoRcgn2w6Q65/FJTUOthM1atzHfT7u0dSiNhSocilMsZaXE7zZAQguFfgAAdhMezg31qrt/aCQBLw90qrdoN/jephqwKwblJZio60bsHJfyWyP4QhuL1cGMHObh8JuIROAjAoNqPhyYjdPYVkTOwMCcclPKrwSgBX+y4N8fByPhzv2SnIiowj9jMLrIOmCXdKL7HJadBsOjpJNjHgjh8uMojnO8JDro5yDlZAykpbYaN8NQ9ErOTkq/zwr5r2nxT8LXAh+paXRaJuL4Em4ek4ZalzSYYQPra1ZgCxkrBE4MWId/p2J25S/Cn1nbjBz2QfIpchkQiAMACIr7WIPgxwyK2zyYlCljod5ur5KgjQ9xIUhppWQi3I9/AG6qNKQ7XsVinlb8uJqekl/FtEIo9wcrqx+oauB37ASKqC1tOJ7WNql1aTJlVTBsny4QLa6XWUzC0dQuiSqrAigYA9MDcsgHo3ljreRdBiwDs3PyPABe2PIoKgIUIJChMu23Sh4AhckDRgaWdakyZwAAjE8ySY1R0wTWEViYKshMyVsyMqe8ZrTwTTcAxxiVq9A9pYE3u7+VmT1AkxF4g2jhF5/DBNAmJ+YEmD0wmB8fn5KQB7mw4XybyuN8AUB2sB7bQNc33svBr8SZs0gBhaJpJ89dgzjDLoFKmRAvMM3h/I9CZs03ofxQklInRQApeRuplhHFiMkbP067XJj4Nv5iohEs9pVMIT2qLRA9w47wvd0SNxsDpBT8YLHd6WxEUV475xSDAeADWRw04Fbm5iXBIn8YHwKnZ0xJXREsoS9F0NOpmA16YX8o8r4q0F/3OhgQNA4nuDIe6VOm5jyKS1kULOLX1p5UwEwS63COqeMhcQHQoITVSiAI3yP7o3qw72PyV/pucAy+z+XKsUAIonAcLkegRY1525NgfpFmrQHJzJOem9L5E6CgubzLsUfLDZYjo4VUaxZjY9KrgUEYiywXLse86b4gBEQIXhCcKpW/0rehy2DN4btCn0vpMcyYxisgyA5+MVDW5cknP6MkFv/u2yNKCus3//wLMBuOV1ggOC7KzwL3W5Tm0GUW9MNFQIRSBeMTo9Azv6D0lKfCaExAQTUH2UhqU58/+6piLlISi7HSg65eE56chE8ZmCFkU0yC/UUvEQZ9RHhvw+B3IqUXaBpLfxL6b2wnk4qyKUlot7/55q/kRz/6f5X0FcGPL33pr5UsRIXRpQ5TJeYzcObyoFzojsrJx5+UE2CCb1dH5moHgIWRS5c+kv/03b9TXkgePHNR9uqLf/iFLQM/OKeKB8jKR6YU/PjGN74hBw4e2jUM6pVnXnm3koGPZ+BWZYB8PBOVVz7JGWC9o9qy2Kxjxd+7IXbD/cZa/D9Kc7UbGRI2w/VsO85XZ7fk52syqx1vnb0SK/ajnxzS+PNR6geitoOGsErc3BlY86efIEW0ZkJ6L5slYm2A1wK6bmMJSUMuiOBF7+VhGJ6joA4GCCPuAvMCniD0sNA9QcwGmJzPG6HTH4RFfspLFVyDKJgXJAOGhsIAEJbaggQtAZlsIgVkWjqgqUgAwouyGX1FCILQ42EymJK79u9VUk9ScIIdAhNtAB2GKnTlm/ILJugkRg0pJgXkqjBeDBJGcQAAozBWithnlMQWg4bmiVZN4mom9pECSTyYpwAA0YPbjxu0gjFBFX1c7pu5vlVmU1onGZeLFVtkxLKIOCfBGnBRWutyRHqR0xbIPRlCi1JWSnKL20Pwd6iLqWgDC0Vjn2QgQ1WvgI9qmITq4YQMmLPgkiaYXHO9YnQSc7fDrNuJ3zXgg8ySXHVCPHVHIU2WkjFIfzHGzr4vv3z1LW0on7bv0JtZGFv01xZfkSLM4q8LsPANQ31SzC++/tIYdhDauMUaZFcjhiysUoe8uqry0m/NSUt2XjuXKU2PiwOSHSoofwUghzEGn5IRSGvFLV5pcFVjv7ISsQWVD4wJBu1kkPRZEtIIUCRTNQEwxa48WujfYQEANIui6HD68jxAYwHmouWYBbZZADdmiAdW2SIy2AOzO4Ad5oJ2vM0wYi/aAL5Bsmsuo50l9nktzRmYozfBO2NiclTSMJdPQnZN7BqrhGwSmsTzJ1kpuyE8Tos8dU+b3Hv0ev1Sl80ifG+3xM3EAFEGxGAvsIDPwjyN2nT6vN/bKQW3Jl9FDXpGta32+kL8vMyVnnuCIAwWL9lVTfYGQ5dj0iWfCABYrSfVewRXyCajbBMpYAtG4vNj02idyxAIIDBAgCB8LCDOs6/jQ3N1WRCEwAUNzqeDzWXlr9TGEboMVuzEA4otEp0HOvjeUvBDZ5QQkOGcaHrM3HCOzA1j6fyVUTm6MpibOUdezZ8d7rrxPNfRDdM5Ht/jMtwGg8txDDI7qvFv6fhchnklcMX8cAz9GPI1BkEkMkJ0Pxb1YiW2JQOBmhCMtL8gL7/8Grwq3pPTv3tVHn3ssxUWCLJPo3L6dZQrYCTRPED2w09+/G3F2CBoYbPZlLSVHvQHKY3VGt/5mc2BJk/wIx6LyOnf/EDegI9QueD2GDRnL8coKbfOZrxG8GNyalQZwP/iF9+R8NSUkup65g++ocCPCpNrM7L8yRgjOZ2T7v5x+cWrPXIEfmhf+9rX0OilPw3s/D5OTozDS+f78ibuz00mizz66EPyR899EZK+8/fpWzDFigfIykmtgB8r56fy7s2XgQoD5OY7ZpUZrz0D9Pk4vq8GCulW1TCsh9Nhlg402u50bBcDZDmGRM6AOsMmFPThdrrTqSy7/fWyW/RByrFcCIRYjVDDQU1iJ6S+yu5g5cUNZWBNAAjlrlzZKiELhHEXOuST6JSntFUOJ/z0NIrvkFsQJY3lhppQnTJBJ+vAYtIYHARByPjwwkApjibBGACPVjAVsllIx4wWZSwyJX7Q0aqhac+yOcPtWCzKhke6xZ3XCskEVzQmQwAgS0DcOYIZKMRB6smZmFVm2pTBms0v7h4BEputRbzooBodHcCs4XUBrsn78wbr3B5loJzmDJgn05qElE8rfvE9V5sDtXi/jM8MKtbL1DzIkZwlQlElVfDbaDYdlYn8BKSX6MEB0MGwT0YSsPie04r4ZJnMhmFACkP2og/zC89IfD/AmjS07AGeTKGfWTeM5zYVq4QgCFAhXXqLc/A4J2Virhq5NeJh3iypTFYxQIg/MYrRrEzCZJ4snHg6LDmYxvP3Yr4HXbMhFAQnZDoKbW9IYDSMm2AM5ZI/OFwn8VxAGptbJBNnNlHL93gk6y/C/2NSAtUGeftnU2K8PQj2hk1qLc2STY8qX5iCT9NUvIRCQ6PVK6/0zMIjZE4+dbhTDGbtnNFmtvh/HGisJxsB80akG8bOjFp7UMb5e98V9Tc9P0Zq59kbowlpwL73xnvlpNst+/Y0KyDMbKBk17iS5DrWercU8DuZPMrsHGPU7cV84TijZLZwxONmbTzYgcMXhCCZVXIpUEuA9zS6vZJph+wVQA9GY7BT/aRXyHSc56K2LyZIaAH6Q2VHpOvKZZkeH5aJ6Vmpb2gUK/YrMQEWEDphja0A6sB0sbp2x0WBHh/10Lrkv6WxewQfbh4PkKXgx1KzbL2IzlyTpUEGBoOFdLJB6GGhsw7I9GChXu9MJviRSoGpRskqsDwIrpAJoWSeIJfFbZUW8vg+xyodm+tZTFqx04iLNZfnP3ZnB869KfZXX7hO+mopE4TG5uFj94hzalDJZa0UlMGygQFCFojlrX1q3KXgB9dXDBAs662pk8TtD6n9XU9u9H0uzQ+ZHwKSlS5XRdCC45YuSwktyogxPwSrCLhwPT0/NJjXjtdicZjz5WulIAgZJxUQZKUzYfPf4/fW4cOH5A8+96jyq6iwQK7PcTnzXX4+ursvKOmnc2dfk7b2fXLvfU9La9shcTo8C2yNX/3qeXnn7V9t6KCxG/3AwbuksVFj9y43SA0+67W1Ky+z3LrrfZ37TRP20795Xn750ncV6EPPjydgEN/RfqTC/FhvQj/hy5eCH9/45r+UlpbtOU/Xklbqlp8/f0F++rPfSBi+hffef6/86df+VNr3dm4pQ6XiAbL80amAH8vnpvLOzZuBcg0UN+/eVGZeycD1GSDDos6P8n+J+oW+xG6Qu9wOBggL9rbi8mCP8gdZ7NVe8RRaiS2yUwbhygS9DIhDpspK811uRxVoskxhbLcCPcvtS+X18hlYEwDCVQ2NftlTUwDIEFLeCGPXLktkqEqa9juVCbqKQKPMQdbIAeNrPViYZrDAz6BxOAvXR1oILgDQgLRTtdeB0jQMfS0NYsGXlERQ9C8a1bk8CLYGI5abkGvJWRh0tkjQD9ms/rhYUFcuRimjNC2TkMYSgBxVBRSrAX4o9kcJABLth0yWp1+siRwK92GZy+HkpiSTsx2Fswk5MBaTyzNWvN4rH17tkVB9k3TuBWAyP38yQ/JWo8QBBBFMGQebghENah3adQBACH4wuI8TMIsvNGTkWANYJjGnXEr2L7BMMtPoRsZumQPalxHHjuGDRsCILJmp4rD6SZaKFosMAq+H+YBnyHiVJP2a6a8hCQAEHc4TAYNQzamublHOiqyZIiS1FCCy74hil9ConTJbZM/okfXBiB5/EPygX4fF48YyBhjBB6UHAEgRRcMDbdUwLy5KaD9N4bIAZpokGDUsyKDd3qYBRm+eeUd9EX3mZD2ONYSvYLisS2/RkJ2hS6fdjnPh/WvaLLwWymdpElp8xYVzrrHFrPLyLUMvaCBxgB/0ACD/BawYAFRK0gxAFs8viDsp4Ir5j+O4BYvcFjxZAL4FW7Xt2hIOBc5xTvYqrwynBmAIn5KG9noNHIFHDcGNAuSEElNp5d9hgLiYzaPlmh4m/YaYZNIJqQvBIwWGWrbaRqmHjIgVMjoeax1yBi+YsVlwSrAPAFlAA1Lz3enI5uZkZColUwvnlTYjMrf21Loh1bTIKNrJue52Bgi7pGmsS28OMi90SablDOC4PLs33W5NIzqFc95mx+cHBXn9NeZbX38p+MGCO9e35uEjggK87nvBdbkOC/18n/w1yszwu4BAsr6ePrY+b4Ifof/4rxbADwIVDF0OSwdCaGyu5KRWkL/Sz5MFGSyAGmSN0NVEH1MHQ/g3f6dRuv4+QRDOX88DZa30+ZfLDccolx++ztwsBUH4uglAMQEj5ofjU9ZK4Puhy2VxmeWOHd8jCKLPheNXQBBmZXvD4/Up1geLgRUWyOq5J6vqnXd/rTw4amsb5PPP/pV8+lPPKLCRQCsjHBmXDz54ZfXBSpYgkGrBdwL9Pxj33f+kGpcA4nLBdUrB3eWW24zXCX68+OK35aUXv6fYLs9+4c8BgHxZeX7o4PJmbKcyxs2dgaXMD4Ifzc2Lcge7Ye/CkxPy4i9eVKBvIBCSJ594Uu5/4P5tkVqqeIB8/AyogB8fz0nllU9GBioMkE/GcazsxfIZIMgAsfnlF9jBd7aDAaLVELxiRPP0jYQCGlDcImuk1CODYzqLW8dMXcuclQfJknIb55ljI/YawR1uh8yOtGFCUobx62SwSufAfFYYIGs5Krt3mTUBIN5DPgkNaVIJBD1y/V2SdodhOh1QheC+Ea3wT9CAskM9Se3hOJZLwKOjW2o7F71CCH4wyGQYG7sKhsIQ/gpA6mmPGIM2xWiYnslLmwcgR45FqwnlnXEg0AYpKu0M9lA+67BXMSOuReEngS+1fc5mmQkDbIFviA3m5+EoJF9sJqWLb7E0iaOlWroG+ueBD3iMTGlzTrtsksM+BKNmyKvAwB3G7bft69B8RcBEGLL7IFnVLJOunBR7ohKlLwnCjdcZExEN7LFXpZQh+NDEpDSFaiQ8QKBiXMmu6MU+6yipd9WYl1WKkFiqdVQp1kkbgJBJFN49jmblpWLw6QwTWJyjWE8whGBkHNtgxFMFSRWHxBKB9wSoWE7IYOihyXGhSAeGjTI0H0mhGD8mNdXw0IAnS3QAbAcGcAOycyhllQXzogkyYnoQQND8SOB/AuOQqhYPin4eOdIwI5entUIHAZ6LENA/1AY5NF23bJ5YYC0GJRGekqsDg8qjhTmNgI9BdlCTb++CQbt27KBeVdsnB+z3qc2PX4lIY0jLbX19h9SHLG4utecAACAASURBVNKL+TwJFssvAYAwdPBEAz80cI3rWYLvyNjCXmhACYGOUhaS5kGj7Xsex6e2CbBJU6vKES0+6gB6MZi70WthCU8nYGyPvM/6lYG7D+eSKWSHwbkG1FhbglC9gnwbWDU+S7sC4PrhCWK2OhSDxGyClFFuEoVX3YekZILb/GsaLJXT7w/IK2eHIEmizYeamHcdqJevPgaj7l0CgOx2D5AF8APHbzXwg4eYuvqlBfbSYuHSwjvZGZmM5jtBRkep+TaBAoIa5Qr9+jhVJV4uLDqWjs95N5z+sXie/w8L4Ac9OwhYVEPLn6/rIMjlww+LYX8nZPUKSiprOZN0/RTWZbBmAQh1wbfJfWq/Mjzn66XbkPltl4IglM7S2Rp6cZbjLs0NX1spP8xNOc+O0vyXjs/CbLlt6PtU+pPLLQVBCKBY8TmvxNZnYCkLpOujbnnrrTeUSTplDCuxmAFKQE3D1HxkZFB5cLS0HpUjR+4BQ8yH8137PuIyNEGfhHwjPUDWEro/SE1NgwIXKJ+VBhOW32c22/WfAy6rG6PzO2mtn7O1zKP0O650+QgaNcj8+N1v/0nN74vP/bU88ODn4VlXv6nbX8scK8vs7gzsZuYHM0fj8/ff/0DJ/mXAJn70nrvkqaeeBpCvNf9sdXYrHiDXZ7gCfmz1GVcZfyczUGGA7GT2K9ve6gwQYBhDY/XlvohSS2FUmzUFlUOtPgl4FmXot3ou5cbfDgYImRnWqvJqJFm0kbM5cC2hiv67E0eCqYFHSr06uD/08ljrvpXuf6ZqDG3ecYz48bAUXbJTTJePz6byykYzsCoAwq45ghwM3wwkVuYWTXxTWTAPzkzLnnZ04OMLhNI//Od0FWW0OCjR6IRMVbklCLCBElkmmHZ7Zk0ST/TDODwCs26fvH/xPcW22Nd8ArpqmvZuNSSc0u60+EL4QOLZnD4RLFDbUMQnM4GG5M6hKxKDxEvbwYOqSE/Zp2AdTDinA9D01SSM9KTQ8HNP/QlpboNUCuKtt89L+4FGiUC6yz5ekJqBsDLxPtQWwtg+qceyJuuk8vZQbAL4VVhNfsk2gslg1yS9lAcOZbWqahRIkYbUFYPgB+WWXB1HJGM2ic/ALxVNIkqfDxkg/AbpbADwYwYTY95QvS4878mBj7FiNzgpfwVDc4BFpI9kChE1J+UHAK8VskCkFmwEYC0hEz+mNrFmnTKQvji/KaeaG8r6YinMQPRJCw8MvnPgOeQgJaYHZaP22KqkO0mApEVmssaFcXTGRbquXbzz5vHTvhppxJiMwWsEsYCazrM7jJijz6gVRAi8GC0tyqCeoEKnv1HtB8ELXe4r6HxKgSj6OJTsuvdAM86RSRRYADzoAMv8ZHmsxy9dENNxDbRRIEfQvMAM0QESLq57zQByw/b4ilYsi0YGAV4V1Dnph58IQZogCsQegC9kzVDCrL4Nhu1xgktkvGgxCy8SGYG/B8AQrptPjYGPMycTRhyEHGSNRqelpblWpnO1MtbdBTYNQCiAUaW6kwuDbfMveRSzI/EZ6R7SQEhu3mKukoaAQ/jebondygBh8TCRiEo806dSRUPy9RbAWXhcLpYW93Upp9LlVwJBlhtXZ34sB36QhaGbthMEYaTug6kPWBKBTG5V+St9u5TBit+Oz0ce36GhB/Hyz5UMFgEWfRszkMOZ/tnfKhYI/9EvRP7m30spCLLcfrCgSjNzSljpRuWlXd3MzVZ6drCI6/HsQTe7S7FwlFm9rP8cWG7/Kq+vnAFq8z/44CPy/PMvyNXuj8BeOCNjIyOQhFkE71ce4dZ5l/c8elRb2VhSvQB+8PsgCw+Pq1fOKG8Q/eGA4OVqwa4nv79WanEvMD4+Ir3XrsBYHU0fjdcDIJQH/BDAKY3XDxy4c13eG/p35HKgCVluOgiiL8PvzvfePS2vvvpPSvaKzI+HHn4OfnCUwCTo8/Hv3eXGXy0Hlfdv3gyUY37sJtkrPbOpVApMpp8p9gel5h565BHp6ECDDbpYtzoqHiDXZ7gCfmz1GVcZf6czUGGA7PQRqGx/KzOQyxfk7NVJ+fvTlyWWWqQCtEF2/2tPHAQAAgn/HYztYIBw91i4LxfZImpCa7y10Bkg5cbha/QZSYhWE1xume18nVJWazVAL50XgZ5s1WKtbDvnXNnW9mRgVQCE0zjodkl3PCsDo6My4ZgSRxQyQhHNnDyBevIEut4lii7/ibS0+1wSRjdxDsbSIa9bvTcRhjk1OvIkqZ1MgVyNRK0wwc4nwKRolcK0WbEa/GB/MGYg03ThfLdMdV2TiLGgvCEszQA++CakrtjR34UiviQjst86pDwuJvogW9XaAYCkTY1BlgWBhlEQRGzWGdk7//0W++gjeeZ4K4rrLgUmfPTmG2IchXfJAQ0eYPG7PmBRhfcmrybbRYWvFGSSWB7wNhVEARX4I92mAQJXAaCQ6cA4fLJdyS+98/55uev2w8obJPb8S+KvN8N8vB5S9U7JA7zwepg/kQv9kwqkoKG7B9rcZGlgYwt+IFyGviMEYRg5ADS+5qAMZyKYAhgrYxrY46o1issIgKo2IaG4xmKgHBQ9Stp8WnFCBx0IIBBEYi714BwMkInxwbCUclJNdVg36lIAQhx5Yl4IxCRJHUGQuVJbvV8GRnhUUmCtoMiAuWXyg+LHA1sEBqkETqJYo45Th0E9vVq4L0lTvURGJqHHH15gZ9RVQ9uKIwGIoKD/xZgXhYx+5CSsZLs0eawesI40mkmVt0Z6zvWpde4+/pgyiWfoYJJmBJ+V+gLmF47LubGs8kFhEOTZ66qBf4xdmd7nR99HYdOnZNNoFH8FlJ0i5KxkHDkyReTA8ZPKV4RRjF4AywnACfxu6gB2QUdHxodmxNQdFlOdS3moqLBkJRfMSd5elBa/RzGbdjqcNjMM0BvFT520kmgCMsT3dkvsRgYIC2hJfH+Vgh+Uh1pvLFd0YwFvpeJ+6XZ0EKSU7VAOLNHXIWjT8uqLH2N+TP3lv5OZUNu894URTI1nFzbjcp+QJACQ8fhr0kyQYg1R3HNIAX0e7wGZMYYl/uX/SQh4KHADQDrBCv7OICuOAIhilkCOqxrLjj/+3II8z9LNrTU/2+HZoclpHVDnAmXQCILoxuxL5135e/MywOJfe1urnLz9mFy7dlU+eP+cnL9wcdcBIEaTGazTxeaCzcvA2kbid4zd7hD3vDTp+NiATE4MSi192hAEP2iO/vobv1RgAUENvjYL0CSfn++Os2j3JzQ7pwcW32dw2fq6PXLixP1KXuv82VflzJm7sT00MTi1a7g+Pv1Hxsd6FROjA0CozvJaaS9o3D6K+aaxTT2mpjReZyadBuByEYwVDaixwPy9JtQoAQAyQ8N9an9YMO7cf1L8gTqAO5elH4NwOa5T+tPucGM/mtYNYK80d/09Pszy+4phMq3pFnstw1aW2YQMLGV+7DbZK+4ivT/OANx9/Y33Ftgfjz32xLaxPyoeIIsnWgX82IQPXWWIXZ8BnQFSbV1eynLX70RlgpUMrJCBHO5tCX4k0osASLQEDFlh1S1/azsaG5bbCRp9r+R3sXS9lRggZF/YqnxigvrEbpCH4r4tlcRauj8r/c28lDNC5zq7DehZaT8q75XPwKpPZ+y6Ncet4phhsS8lnWkUjr1J6YH0U+OhBjzg8oE5LY5EQOYy6MytB6sB2vbVVWZxuvGgCukmskN0n5DIIB6yLbOS7oVBdAiULBSaq2btYg6FxAzQYhaghX0O7IBQQEbGMxKMoFSGmvLgh0mZtSYUkyQDBondDdmoc0PSc/ZtOX6qE4BLg0SKbpkGCMPQWBawEwmDORIsyPtdMB7P9qsCexLgBygU8valLnTEa6BMrAr+HmnsI1b/COv4IMPlAgNAk+nSPEZCgRbFRhjVaWRgnnAd+lt42mxSgFGh7q9xHMV1Fs3HhsYlnYtJ4bdFaXioVc3NhGK7w16lJK3IbCFDw+HNKaBBwKIgGBKvgmGxAUUUh7Y/XM9qQiKq8YGeuyoN5jkZiGRh5puFmTIe8LMpmZ2LSDCeB/OGS4dlEP4ouXi/FFH4J4igm6MXYpNCACEHCSsajUt7UPMcgcxZAN4gXJasmiCkuLi/YciM6dJlHJmgTi3kY8jkqA7AqB2ATZu3Ua4Ve/ClACA5lBYrZDIo3bXXZYUnRmBB8oz5UScF9k+XphpLgnkBMIbgA4PHaGwGxw3A1iW+dg0sIMh15K6OS6p2VAy3tShpMX8EcmwAnrLZGRRMpiWRMcoYpD8ItNDzhPsph46o48JwTPslbYsoA3Xul1augedHLjTP8agFIEIDezA7AH5FwQzJTWlL6b4hHIfgR8/ZMRnCqXnw0FEJ1fjxD+clGCQUSKMMFhkvVsfdMp6Di03auisKIXarWe48GJKT+7WuWJUUhBESJRbTzhXs9HnoP3cbA4TgB307lAE5PmsbYX7o+1auE3mtxf3SPLGYSHkshu57wb81L5DFJU2j3bL3539/neE5JakIfszWdYiR7eAI/qCMzeRn/lixQdwodJLpkIuelaXm6KXz4O+62TmN0veP3iexOrvMAkwdf7xZARoEP/SwQK4vc+pR9ed1IMg886QUhNHXWW9+ynl2bKZcFQvMXi8Zd62LIAjYQBUQZOEwb9kvdQ0N8sADD8pLv/ilDA72AwA5L489/tiuksGiXJcP19fszDCujQnxgy25VUGJq3JBMKJz/21gRTQALLooP3nh29Lbe175d4yM9MhHl86o4j9NzM+c+Z2MjV6T3/72BwooOHjwpDiwvstVi+vqJbz/GoqvbjA/6mVf5wkFpNxz75MAIy7LW2++JD/58bek79olocE6g+Of+eB1td0TJz4t7e1HAX6sLjHA78aBgW75xx98S4YGLy7sVnhqCvdGEcwlJf/wvX8vdofW0MH5ffazfyL33POE2v6Vy+9IIhnX9hdzWikOHbpPvvCHfyGtLZ2bLo81M5PBd3IE+fJXfEdWOgjb+N7NwvxgSnIzWbDU35KhoQHF/rgP33fbxf7QD0nFA0TkVgc/TLugaWsbvyJu2U3xuvtJZoBAhAINCauzW2/ZE+ATsONLn6vZhFQaJqNBjnbUyJ+C7ZGDb6seAdSnmmrKsyK2My1bzQAha8NRWPTXLd03+lxEzZclj5rnWmMleSivoUVGPgH+GMxZujABtZwMtHWubxq2zCvJrDVfleV2ZwZWBUA47YHJsERGP8JvAdyQW2VCYQZxMUTmpA4NA/ZZdPniKtPcARPs2SkxuFrk/vY2ZahtzkZkzo6ifsIgUSe/lCBnhU5qnwUGMm7IEqDRL2yZEp2ApksFZfBALqMTYjxSI+OZKRmdhqQD8AE92I/oaPVL5mISUkNh8blcMgzAhMbqBD8mJ2Ay7AJ7pM4v3mqtuDsCtJfASVc6L3mjW4rBFnF2+GXm/EWAKuiKdEK6KWeSZLpHSXX5ImEZRpFl0hgTGkWrYrsFxt65UVWkpyG7zV+A5JWgeD+ifDbQbKixKzATGzSpYyjsG1tYAJkUE9gCeRT7jdM+8bdjTDBFPIW9YkAtbabvmmRRsE8WtKKmBQABzb+BQyigQJmxp8ISAzBA5oTdRqPzxYv61ATyhTDWuuBHkcK4AZmJwagZxtfXJsDCAGuBkQLokZoLgW3TLnM+5PTiBfU6/S7y/Wfk/diMHDn+afWaYQTbx89JrEOJKUpNMSymjLyDnzqDA82UStKKZuN8TQ8CQNbqOjBtyJ4YlqFrIypv+/e0iDVikL5El2JX6EbzxpgPBu51OLPA+PCYxA1JLguYKpTW+vDFHpnqMIu3pUOTxEI6KHVVdGUBsMwpZomoOmuVYtFwzkHPYTUVAl48LgQ/GDNhsImsyBHkzXTAiiBNKmaXAgALgkeBWZsE93jFEKgRpxcFDTBpCEwRpBntTks/wKrq3JiEwzG5885TcqgFx62hXhIAbdxGn8RGcVGRUdnvrZWRubVprKvJbWEU0ZnKyIMOel2ob4HdA4DsJgZIKfhRDcm9Gy2kL2WArLe4X3rclpN80pepxndCYAn4Mfz1fy3hp7+qwI/SuegKMTQM5weJf5PpYPbcLxe/Ctyne7/sG31GDd34nf9N/STwkXnwGblaPyfxmoAcn9LkAfkexy6V3VErIPTtpO58RP1dDgSZOt4BlFErmG40P9w+PTt0uSpt63vWVIjVll35/7m5KowP1hj8D8gC4T8P2IzM2dJjvPJIlXfXkwH6fRw9elTa2vcpM/S3P3gXhf1eeFwcWc8wW75sqDak7j9GR/q2BADhZ8sDeScanLvdkJ1bEgRCCT4QjKAnxtkzryiAQAcPmpoPy5NPfkWtRbkqsjneeP3nAEBy8Aw5oFged9/9sGJw8D3+pJeIF9ukoXgblvmj5/5KsUzeg9n66d/8QPlucHwyNRh33/OkfP6ZbyhAZS0G5JS2IgATj00JQQ+yTxg0XK8DW4NBxgq9Rxh+H5ovADTo4fHWwSyaHnB2VTzmT45R7ie9T7aiKEIgmZJgZM2cPLFPlj6IL0y28su2ZmAp82M3yl7pCZmcnFTfa2E07uzbexCyf5/aNvaHPodb3QPkVgc/eB6Qte7A9bYLDYEMfreVURLc1s9xZWObnwF2n8fj2nXU42YL4CcnqqurxeeDpHWi5xMN8nxyjtjG9oTPXGx+4T2dH1K5S4NNSa2Qu2rQdNCve3s3NH9uNQOEbAwyM5YW7slu2KhHxtIc63/vlD+Gav5cO4az3PQXXr+ZmC6r7kxlgbIZWBMAsgfFLZf1uPizOfy0SC3qXHk8bJomozIOYINhgS57//iQtDQ1ihe+AtMOSCbhdW8CaAmeh/MBl/hxcqZIKYKpnwPsDzwxq2XmCl4U9GslimLy0FQK0lhJScQg72TUUFxKYFnjs+ItoJCM4Qh2zCsxoXsfXf4AZqLJpBisBfE1AXjJDKniQ2kY4KXhrl18WE5Nd+Ntv5jQyVtdEwIbwAFAJyNRS0J5O8TNmCy+TO3wc/CatcLeUG5CUhEYWrvhi5KvRiEZy8LwPZ/PiM/aAmPzHEAH8hvwEA/ZmTYwPYowz56FYXsTjMHdkLiKAAAxwNskMsa8gQEyMSapNHJDPw50NEbAwsjDYN1kYxHBKjZXQOE+cTA7LJF+CWObjJBd+6Qnh4bF1tYBxkyb8gZJo/gwnktLzgbQil2XkLSiMT1vYRPjU2JEEb/96EnFVhgZm1Am6IwcZJqmE37xQbrsXKRPmv11uNt1K9P4VAqnSaqoAKvUxChACK04mRhPIadW8lekZ5jHgWGVdHE+XzCEHwLwQ3DJ7saRBsuGeaMZfSylgTdDXd0KXNElxK50E6Syy2XktQ6AEZkiXoNT+cTItWF5tyovBwA+FEFzofQZPWESZM7MF/AJunhQ6Kc5O5kcA9Mp8boOSzE5pnw9NCN5GD5POwG0VM37fVTJAKTIyMahLw3ZG3EARh7IjqV5LPvBSIGfRxTFoQPtbtl3pF5OPHpCSV8NYl/OnjsruVSD3HYYbJOJcYkb4H0D4IljEYRy+DXgYT5BO/ZjGsbnr54ZknM9iyCVzWqSllqnfPpEs3ic16PcOzXR3cIAIfiRSECyLTksBD9osr0WGZeV8lbaqZLL43M672mxFjP1cuOWk3zi91ahOArw493rmB/LgR+l4/IBl3PkDRl/T/nqxeJ8RuK+Pnkf4+4fSSvgg9JVZIZceqBFpp37AVgekBGAATQWL2WhLPfAzPHTdz0qM82tavMLclhggriLT8vMPX+sCrPJ9IDy/NhIfriNBbmqec8OghSbFezacYAxJ5DAIgCiGEJyoAKCbFaClxmnpWWP3HfvHcDuP5Su81elp7tHDh2C3OQ26OMvM6WPvby/cx9YIK/I5cvvrsv/4mMDlXmBD3tBAPN/9mf/WvmzecA2sdvR0FFymeEyjQ0t8uyzfyHHjz8AhsQFdU9irXbInj37hWyNpsZWGJXnpRpyV11d76r3O/edxL2YXbGZHn7ky2AxNMjoaB/YLGlpaOiA5BRBxSp8zqvVflGC6r77P6u8PqbCo2q2HgCDrW1HpKmpU7FF1vqdSfB1794j8pd/BXZaiYcJx1wqYaWnhdvnvh+/7X5phI8bpbModVW6jr6sPgb/1iWwNhuspITYEPzpjMZxOXbs81LFqmEldiwD5Zgfu1H2Sk8Q5a8+PKN9r7EDkXJ/283+uNU9QCrgh3Y2Op0ufIc3ye9euVTW52nHPtSVDW96BsbH+8VsSUo9GLafpGCDUj0aE1OoDxHk2Uo27icpbzfjvkxODOO5NyztHXd+7L6Lt8b0AUlPz17nd2qFEboJdcaqtRpgbFFitoMBQimopQbh/HvasH6PjN3oj0FT8q3o480ZkmCAaBL7pYffakBT9CeA6bJFp/RNMeyaABAyINjpXkvGBhTPGTVzXqmGnI5vXDPbjAIYSTpc8H0IKB+EDDofhwF+cN0qqwdyRF1gg2hyQgRRPPCHCE8DQACAUouuWZcNVtIxG4zOs5JE8dh2Ap20XVrnvHV0VprBGsjlCmIupOmLrsKczsiMRZOXUKBISVB+IJ3SfEr4ss3sFFuxWdLRS0K5K7geyBmuAqAkM4AvReybKZGUDDoGCcqkxouShpQBw6G8HGCObQOFDI3+LoASJgAMBBS4LAv8M9g/O4AFRgHgApe/ionGLVpHhQN+KKUxDiRnqjsmE6ND6uXW/Z0AfFKSCzvECkDFTYNtABejAF3of8HfCcyYUKSQQCO6GWYUsyXB1wAUePytahwzCp80PiHUozFUUNTG+yzux7JxcXsbFPhBmSYaeeuRhEZ4AIyW6uIMir3afhMEsuTIzQGTJTsCL4v90rhvv+SDkLGBVjcBirHuKQWuMJTBPdZp8htlCOygZAEFUUgHFXzIcRT7VAsQCPONcZ+wD9ynEeSuMDWhwLRaL+dar5mL472PwNThHGkqn42bpIFm8AiCHOmEW6p4OgJwiqfH1VgBkxuyU/WSRufSFCSd8rE+BWbE0t0yBmBIASrzQc8SNdYQLn0ugHkFuJsgpw7lq4KzHEyfkapZSU5pUhxtAUgRHOiULJgj3L7G3nFKi90qF3uvgciUVqwPA8APSrERBssMAgxx2MUj15+bC5PY5l9SuPgT/Hjx7Z7rtvzQcXT7HmnAPHdH7AYGCKX/UilI2Gwi+MHsLhTcACbEwGzj99JGivulR2opCJLJXJMHXrm6AH7oTI1yzI9yR5xzZDGVBWXKYrHIbzZ3KjBiwN0FizON/XHl1H4FfgSDxxbkn/R633LAh749/X16kFCOi/4hZJYQWOl8e5+8eeiSAj4oObbR/OjbKPXs0D1cyu33el9jnvLAoZkj+oAwPxUQZL1ZXP/yZAkeOnxU3XzSiLunF919szmcg7sDwOUeBdBUcfLESfnJT9+UK1fOwQh8bSyItWaD8lXtbfsXFi/3eeP5GYIUJT0ybjt+n8oRwwpmhA5K8OeRI3cp4IHBgoEu5VYTrJeHHnp2wf+D71HKSv8O47rlxteXWwvro3R/uQ8ul08xq9YTXC/gr1H/1hNb8dA5OjYoXZc/AEtpvzQ179lVoNx6cvNJWfZmYn4w55S/evW1d9T3GuWvKPfHDubtjFvZA6QCfiyeabz/27OnSQJoULt65cy6wOztPF8r27qxDNAjkA0MtSE3jnfzjQ22y9bmOdzY2ChtkD+6cOHNLfP92mW7fctNh81y9Iezor7E5qOlzVBzaCw41zMhL78/LOEkWMHz8n4B+OR+5lSLdDZv7zV26QFaOt+l72/G3yzYL40b8ciYKqIWUHVEbIXrc7eUZbJ0m1v593Im7xvdZs4Cr+V8vGxtzF1sQn3UjxZprV660W1U1tu5DKwKgPAhtqUBD3PBqMwA7CDTgmBDgzslU6MJiUIyydlWL7cf3QsQgA/HE9JxWxAatqMSHYZkEj5zDegsT1HSxJlTrIg41g8FAvB4gIxDcAZFcD5Ym8VcQ5YDuu6jY3IVN//JR7UPbGTPhFzEg7F9iJ3+dsk02eTUTBPm0iNGgC2z8NjI5+Ef4rdjbh1izkCWCgVGD7xHUPOVFDocU0UDWA0w574MCS6AMvQSCYEBcWUwKvYMtp1DB2UttO+B9TGqbFMAOKKSrs7IXjhEDNPAHf/oZZJEsZ3Bwn4KxvDDZzQZKbJPfAAJZufGVBGfAMlkPcCjvimAMSgbonBDsILhzFjEh7y46/crcIFBoGPWh4K8MyVRFP8Nk0Uxz4Da552SAsALBST5gFZjXOqB5ascyuy9NGbz+DICuBDrh4wX6vAEDxiU/wr3QHbrcAP0v89LExg4VzMD2IbmAcJl4pC4qm3ulDnsM51d8hkAQ1XYl1q7tBvuV+Mk8J7gAjKdBIsAQJBtb5P6naBGHgblGbBoGA4APwpgAZvDFWiW5BzM0eGpMREpKPCJ0QQT8Svwc4mDCeRB7cIQDEGyrFVsKZwPkMAanouLaS4BpkZepiFDVcT+85iNhWCwZIHpu92DLyZ4oECfLz0HcASgzuxsXp1LtnhBhgDGDHaPqdxRwmxMINGGIKtG5k3NPZgLARIycZJXu6SIc9lpdSuZMyeOV9JoEyf8Ryag4lVF+bF0WO0DZbossbhiyNQ2WZW0FyXffC04Byg9hu2QPeTDcmQE5Vkp3eEwoXOXBuh7aq8H43gTwPd2S+w0A2SrwA89vzMzYXwvpTYF/NDHZPc0jdAbuq5AOfCyGAa6FJhQCn7k6yG3t46DrBdVKffEgqvR2C7jACXOfulpgBRdaqRWY43k8Z4e5QqxK22S8yYIQnCGYX/1BfUz2XNB9oSnJPipR5SfyEY7tXljyTmVenZMzwNPK81rre/pYBFBEJE9ajWCIIVCozoe6y0Cr3W7t/JyBDqa8FDbjGs/zdAHBwdlCpJJfNDdLcHO/1N3n5LLXZflvfdOK5YGGRebGWv9juyWgwAAIABJREFUrPEcNBodCqhjLF2Pvjz8p4f+Ps9tghzl3ivdD45vMi2OX24b69nvpfNby7obWWej3ynLzScCyVP6qbCT9pGHvojvTO1ecrnlK69vXQbKMT92s+wVM0FAbmhwQK70XlByHseP3wF51Tt25Dy6FT1AKuDHxz+PBHHvuP2wvPy7D5T/02Zfwz6+xcor25kBPut0d1+QmdwwGjYewn3KzhaCt2Lfg8Gg3Hf3CXiL/U4GBrshK3hsw88TWzG/ypg3lgHlHYfjSrnXp568QzUfLY38XFFGp9Ly+wtDMqUUQ7TY2+SVuw9rsvNL19nOv7eiGad0/vZCnVh17+JN2DGyU6krM1G8IE1Vd13HLKFnxnYH5yNLlN03Yw75fE6yRdR+ywSBnooRepnE3EQvrQqAcF+CrXgQNuyVy9kP4J0xh4JUFkbYNTINo+m6gwelONonvTBKdx12wE+jR8kS0Q+j5nBI/U4PB0vErLwsLIUZ2YuO4jh0nGlyzSp9R8deKcwboHN7bodFZt97Qy7mAKAg6hva5Kz0SRFm0yr6RC4F+mEIjt8XntvZ3QhWQzU+BUdYzCU3g8Gf09Is/TJcRLEbRRO6qo8ARBm7mJIwCuxTLR65hk5+UkuarFWSCgbECY8Qe0O7OEZ6weZAFz/YGYyAAf4RAArIGHGi4G9yzkrY3QRJBRgGpybFUAdZlNZ6MSQDMjtZJfbxvNQMQNbKDdPiRi+Wga9FLeY4o0lLkQmTiGi99wawHGCNIlGwT7x+r1T5wIbpm5R0X05sdZqRsNomAAkCHKkEzOnr6AUCkAdmqE4UMax1NrFccorXCgkozDM1BnaEd04BPnuO3ilFSFt4fUbMJy/10Er80FjyrWGKyAS0sSOmLEzqx+GtUS1zZOcgFxOpHulDcTUI43vOgVJYMemBzAcYIZaMAjVoDq4fEBq/m2yNCvSIACAhu6XvzREYq7sUoEb2D5kpihWCtQhKpIfikuuN0hZGXJ56ACdm+G741fnjBiAUBFiFA6kFaBrTc9D4xl9knszkDPA+AQgRGVNyZlKrFR8ag51SzPZDWm1CfCaTFD15IdtlnMyTeWAkdW1U8jW48cM/gh9K8gvrx2CqLtj3FIbzWEwKTAov8c/QJbNIg5lB/sdiYU02jB4xkMCq9qIwpKgqOx8ep0WeuqcNRuiQN5sPM5gyZF/xvd0SO8kAoedEJjOumB8uF+VVatcs4bJa/nQ/EY6dy4JFhO+NzfKM4A0UPT9cP/vbBb7RUvBjtfmt9j4NzSkFRr8Psj+8Jebmq6273PssXLKASk8SHQShp8jjr14SW98LYJrBZ6eMZ8ly4y19XS+MEsTRmCCtQgCEAFQWbD+yCDajEMrt6CBRTHoXjOkrIMjSI7I5f1PWoKO9SQEgly9dlImJyd0FgAB48+H6+vjjj8s/fv/v5e23fiGWBz+vGBM7FcuBBMu9rs9ztfe53FqW2an93o7tJsGIfe/d0/AvOQvZsS+ABU2psPXAzdsxy1tnG0uZH7tZ9ko/KgV8iLq7e2WwdwRSdXY5fvIIPH5qd+Q8utU8QCrgR/nvBnpu3X7ypFy4dE1dw+wPP6eYdrf69335bN1crxL8YOH4HXh4HdgfkLtO3QU5oN3TCLdZ2WTDzPHbjssHZz6QD94/jWc6TwXI26zk7vA4fKYeHulXx3VPczV8604tew773Q54tfol6l/0zm1A/c2B+sdOx3bcKy7HjrBBDsWEZ3vlebHG0JedMoAFYgALpMQkPGEYkpwZGjRa7/UaR7yxxTgfhwH15oW6742Np6/NcSkRRqbMUvkwLrOR3G3OzCqjbEYG1gSAmA1+JfvjbwrANQN14irItgDEqG29QxlEn/sdQIbYJfEkoRF9771SZWmWgmNQmYG7sknpheE115mEQfYk1s+bYYqOnwRHfL5OMYY6pJi2yWAmJrPxqPhgxHXMuEc+/O0rylDztm92yD6YcjLIxKDuNSrN1+1/NDoh/UD6I0BHDGNxGQGbozQ0ISOAKYaAjDXPSm0fUF82RB5tU4u955lQP7tcRI8xSyh9NZMNEjwogwpAIXCimfZKk9YlASgAJr4paTjkFGN/SvKOGRkC+JmyA3AAe8LdDrbLG+ck9kFEWm/bpwAKztpu9Iqzpl7A2pNClVtq93oAEIGJQRATf1Pqywa2g9kIWTEsP2eHDI1dq/ybEmlxQq4JlXowQgBQ4H16i+hRKHgUZX4cYExamY+DuYCxfJYmSFINiwfG4gFPC3T9EzKEXb7XmldMEbiRkKwDsCApLoA7RoAfBCdoFk9NKMp8EbwgS4VBgIXgDUEWJRtGjw+E2zQjb0P+ymEIK+YE2twVCOK3uaVpn00BYxEUC0N++G/Q2J1jOfbKO2dPKzkwKzrKfUCBwpFxAD/wBoH2eN1enzTAePUxh0mBHf25ESVHpsuCqUEQZN1wFgnkSMbTak5VAD36AX5Mj81hLEiWwVvE4Q6BDZSV/pG4fATZMzOWsQJMcrqzcvXCqLQcOSnVcxaJGiHBZTeD4aIdFzs6aH3TbpmBFw5ZLxYcMDJAxofg9ZI1AbgZA9gRws3VITFAZiycg7xZefBYn/K2/uTNbZ0fmLVPBwdxbsxrFm3HBXitO7tTDBA+EMTmPTm2CvzQJZg8kF2zoGt6M4rvzKsTxTeCH/TSGHlEMyvv/8ozMnD0cXxu6yFtV9zwtniKUNuekmCp+CW55+dvqENJEGTGMit+5O1GmA4cnw/UUWdA+p58XKKRLsUwIYOF/xg3AoKoARA8xwmCeHNNOM5DWyJXxW55esVUQBA961vzk2yP9o4OkdMvQwJrCGbjIzD9vm1rNrbBUXm+7du3T57+3LPy/X98QX57+nlo+j8iNBBfqy/GBjddWW2bMsBrBmWvyPwg+PHUZ5+SO+64fVfJsW1TKnbFZm5G5oeeuLn8rGKMDQ0NoBHKLMePHN12+SvO5VbzAKmAHyt/dNv3dsoTjz0oP/7xj+Tl3/5AHgII4vMGN3w/ufLWKu9uRwZ08OPVV36sCsdPP/VZ3BvvDqnmrdj/2rp6eeqpp+V7f/8Dee33P5L7H0CTAjzSNuv5ayvmXBlz5Qzo4AePJ70/nnoK4GwZ9gdHodH5sQ6/NNYcR6F/senXisbWAJRidjq2kgFCdoSjsPngQLmcESiIFq6hXrAor19uuc1+bSUGyI2CFGmZELJaSkEezp+AiNfYIlNg16Cautm7VBlvGzKwJgAEvuWQGpqRPAreURS8GZNgf7CZMRVrhuxPA06OIXhyTOJ1AhvTCwbV4ZkijH7zMkxvBXTFE0ThGJ5kAcu1SSxvFjekApIAPsLJrGSnZqVqj0f23n9Ymj+6KpHJKbnvUKvsddUoQGUffNGLAE2qvVkZnoiqgnoVGBwmmHvHYZjtAcMkfrhaPurVuu4phTUzq1Gyqs12mTXhRCUKA74UdS8//OU1ue0zkC3yaYX9ocg1tX9TNQblo6EHfx/FlywBFP3nrwikZGvlw/mFLNXz0kLoWnZBzokQaPKUT+am59MM6S9vA5gagaCYExEpxuE54YVNOIrzCUhXzdKAnQV5rjk9ju5kgEFYh3BL3lKlwAmGAYbrgmJkAUbdTscimp2EzBjEnSQD03h/HQGgXgkZ/TID2aN47LwCWOZghE1ZpioT4J3RAQlERgAkgPlAs9L4NMAniwynzABlmsUO2TFKSeUB6FD4X5mEA4Cgr4Z1VgMvbHurxRVrVCwLwi1DOP4hTJOeJzzezim3kpIie6IxFFCm4AJJqLmEU0YTmhl3+OxvpWCMK4DFGfIpcKT38rDMIS/uVo31Y8sPiwXb5LaruqtQLMV+wF8DmjxSCDbi+BHNAvADuEr3T2GuCNAQAKpvdWiAUBpSYi7Ik+GUIIhDm3OiJoo1wgA7JzqknQP80+R1IpftAO3cKLSdBzsAhvfo7DWnpiGDRR8aSKrNjkoRIJStOqSYK9N5u/T0QGassQnbAYvn6lsy+5nntPF38P8svHUuD0Skf5QzX4z6oAM3ByGxQlZsN8ROMEDI/CgFPzazc19nfih/CGjoE1xJZheN6G8k53yQcb//8gL4oY918atPiqn+YXzUwsqgm+bfG2Wb6OAHmStusPJsfVfngYln5PeHhxSQcyNMGd78peAVpOfHivwQXLH1aWbr9AZh3CgIQpClUMB1h3I98F5hbLZnB7fB4nYFBLmRs3r1dQnyt7S2LviATExQgrGwbPfX6iNuzRLsPjx5+x3KW+PFF38uL7/8Q+lrPQwPk7uVpjoZSJW4+TLAB7wo2J4XL7wtV65+gO/0gmJ+EPxg13QldiYDS5kfu132qjRLMzMzSs4vHotALbcJXdkHdqQoeSt5gFTAj9U/p2ycOgV2AOOFn/5Sfv2r/wp5x6dUFz3vdeZ7qFYfqLLEjmZAZ+3QL6EHjUWvvf5TBX780XNflL3w9txNTXCbnSiew0ePHZM/wcAEQU7/5nm5594nr2tGqZzHm531zR9PP4f5vH6t77K8+cZLSnL0T776nDq+yzGYeG57KDmPf0tjN/B0t/KzRxaDraq8tJ1iNWwi6YtAAQGD9bBJlh6PjfyttlfmQG7G/tEHJJEfKmuEvhyrZiP7UFln+zOwJgBEl/XBeY1u96z0WoeVp0LcVQVmx7Ty/piWJhmBUfpsPirOGXQLo/id7kkrfwQG/2bQP8FbmFMYhBEPkN7gPrEkZiSCrn1DRivMGlCdjiQNYt0HlgH8CVgsvzQ1AsNtFA1np8TZjE5eFPLHPuqC5BP8GgCe6DGW96NgDV36eJ96yQbPEfUThUfG/Z0Pi8UBP5O+a/LPI27FMNnrPCZ/8MC96v0opLkmC/3qd7JWGARaOP9Yqlpa4NkRh2/FG5Cgum2OIAd8QQA4RA346I8NqOVpAC5yTflgXEx/qMy7O+8+hMKbUYqgXzIsYDmYPB4JVJOpQeYC+GKzuJlsbsUcomKzZKU6jtfBYrBjOUopQYBLunr7YMw9LK2gIU87fHJ5OKxYOaVB/d7xsV48POXFDpN4H9btH9FM6D1gpdCTwgj/ioHJMOStUOSPGWVvQwggVb+8ee5DJee1z3tSsScKsSGwUGbmmQ5ZHM8oQCaMAfAmA7N0YwS/5yfFRLAD8lHZuR6ZiwBUwDZTQ9QUI1tHm91kpgZSXTgWqPkQmGBwjnOQ1zp1z+eQ524xwsjcYoLGe3BWUmCpADtTXiEEX4IWHEMgUR5rncxC7oqIDte3QXs70n8VshMtiuGg/DnmDdlJ4rFPoyoJnxEHZK3IGoknxlReTVkDWCYRnANJMTbukWguJ3O1+BZF/SIYNStQYwjSKrPwWrFYJsUP4COPLr1xdHxaqh0K4Ji1ZiGtZYV02Di6Pq0A8kZlpCsqsfCwAkqmei8DfDmsfEl2OuLpGXn1w2H59Xt9kAzTGFLV6IC4fW+ttNR5AIDsjsLNdjNAtgr80JkTmQx8bAB+UD7K5dCYbDcKgOhjLwU/KHt1+fDDUqg5pUCJ6uqAAkA05sn6QZClkmABMLRE/o+FU5lAAoERxkZAEIJDicTggtk880P/nMRtT0rc0Cny/H9QYAtBEMPAJWWYTs+QjTBOmLNCYZ4ZSHN1gELZ3NiWeHboTJCMSZNT22h+dvo7Y7dun/4KzZAZCqLRYAoMv6HhIXyPp2CgPd+EsIsmrj98+/0+eeON1yEn0gMw5CKYlz5xOP1SzetaJW6aDMzkIBGaiuA+IqqAj9vv3CcPPXA3ALm2BeZHIgE+6OgIjq9L6uvRaFGRw9rS41uO+XEzyF6VJoXnS3d/r3opADZ7fcPOnTe3ggdIBfxY+0eSQL4Ogvzqn1+VX//6eygeH1a+IMFASAH8RuqwV2JXZmAOBcIMnn1HR/rk8uV3ZSo8CoDVp5gfZPjw+sQGEioCfFKvVaUgyC9eOq2aUboaOmT//hMwR99TOYd35Zm7OCmew7OzOTBuB6Sr64zy/Ghp8cpnn3xODh06qMAPNtNRSnLpeaz8tSZT0g0Z9ZlZ1ue0cEL6vhMN17Ulqhg7kYatZIDo+1Mq4VQq6eQ1wBIAjVgbAS0IDuTyaIpUbdp4bt8B+StudyUGyI3sn547+oCUk8GiAXyT8ZT0wj4gO6M12u/E+VPZ5sYysOodCzuMR3pHJRGLihsGWUV0D6RyMH1G/TkGFgJv12muXQtT8/FwWGohnxSryYo561brFB2tYg3kJQufBTEHUTRHx3w15JHQ/eiD3FN2DJ30mQklfTUN9sdMblqKhgLq3Jp2UNZvlEH4OtCvwSRGmGm7xQBpp9QYiuwYL55zSWwOfhMo4pMtMZyfkaItKVOTGiXJPg1WCS78s5BHYryPf7d37pFr4T4UsudQ7HcomSXqt7MzgpHDPMgs8TTAqwSsE/u0XYwAUvajdpmDb8kcCv33tnSggNEi3niPVHntEpwDnRLeI1yeQIXHkZEBMDTklR6wTK5Irh0wKxgzrjwYAVwkHxCzNwdwwYGcdKvtMSxmAClWi5z0gssBZOPitV502EzgIfpTEkNnWAiyWNEEcoP3vFjWCTCDklrmeaCn2oxxfF5IXNUAMIAjBz0oalrk1CGAO/0jkoFkU2Nor1wZmURhvlkBKQREuFy795D8/+y9949k6XUleMN7HxkZ6W1Vlu2qNtXdbMNms0cURVEiKa00I2hmFwJXwACL+WHnhwUG8wcsML8uIGAxgBZYzGpHszIQjQw5bJHdNO3Y3eVdehsZmeG933O+l5GVlZ0+syqzqt7tio6MiPe+9737Xph3zz3n2DqtCmiiCbjzfkaxRwie9KLQ5Le0oWIwOXwJsD2mMeO4rKZyMjh2Wh2x0b4OmUeR1N+sqeNBmkQlo9Hh8vlJJZtF9gXgMcW6oIeGc+Qs2Dt59Xdmzbj83PDL4jbCvwQAhGKe4LXp7F0FsnT2RiFj5VNMoioOGYEYGodMJ1dkEIb1TgAmS8iPOzYtDZiq06xdSYwhSqACFiGTFgLQUgKI5xosiKNswjE1oFjRCaktzRCrDD8SC4ANdy88AjLwhYktgdUCbxVIiCHz8KOxSdFrlFyWDBxIfAU0YIvLMe/O3lEFfiwtrEj3GST/hESpXJcsgJBqTaOB8sdAqQag6gTF42SA8D2fK8yCcZVWzIyjZH5ozIaF9eI+WQHsmGt/zhwm5VV4/vg/e/8h5gfBj/i/+0+S73YpRU56dnB7YTmtgSBkoAi7SvdmKr4VMCTFB+wozp8m6AknpProa1IvKObDXuV9tjKb55wZBIsW3/mOlqI1EITyXozc7/ypZF98e98gCLuH2uNzHLvdidujkatqM0F4Pqk5HyA/akU9ts2AD1KZLBTGYvOSwG+PXC53IgEQ7gAvztiNHgUb8sUXpuQ2mjfmFxakUFhG08iuP8O2zYH+wuPPgNMBX7ewBwyey3Lm7JiSOWOBcGPU8J2axm9FdWGmxyPPwJPM/GgnZ3Z2Xvl/kBV25gyuLzxaA9YjT94WG3jaPUB08GOLg77LU/yMe+ONN6QX/lsffvyRAvJ/9KObai07GsL0ONkZyEJ1wopjyKLxG69/SV588QXpiGgeQ7xWmUWtwYRGvSeJNbffjPN32KVLlySCGsGvfvkBJAdvAwiZ0M/h/SbymJYvo1bH6IjYleH5S1deeqjBZG5uVmZmZuX8+XMSQk2yHVV4txL8+PN/vIm6pWZO4YDiBRtmv/vNi8cOgDxK0JG/Qe1K4P5BbARDyGIIVM/IiunavkEQMqHTrRmxGlHXA/hB+avjAAK2Y4C099haDaJKdzCAgvtDH5CtGCYc39vqVflbkk8fyrH+4ORnYNerM3baRkfHUEBOS6VSlrodnf6tDvhU1CQSHhMbivWJiR8r8OPM4KDa41Q6J9myBpgIJIfKsRlJr65CL94AoAS1f1wr0jC7BSZIDBJC9YVuSEPBWwHgRzaTlB4YhnuMDzTkaMRtCECKCXBBIYWTGJV2jymrwBCoJqmg0XfR2SGdkIrK8YseBc3Y2hDJTBr0tzEJDnthbJ2UxeWAkjXKZydQrPNILl+W6du3pZjJSLxRBXjQBVZGRm5MTEorNyPuoS50/kPKKg6texTisYcSHUR3MiItI8AisnK7DobCArT28TlTzUzJCurteTBh2sH9ysE7IgNpKwIW+CUC49a4YnowpgAAuVJJMQ94JYXC3FQyIUNB7QO8kLbJneVbYoeXCsEPeqRwrgwzzBLpLWLFceD4babBOXwBlMsVFHWxfygMFQA2BAZs4gJzg4weWnYsxvLyeXlR+nqvqOeiAF0oV2U3miSP45M3AFXAP4JLFeSF242DwnGjuAJPEqcyM7e7usQ/d08sS0UAElNy9fa4DJ9+QZqQrip5gQvTewSMFTI14DuswKy+IE24o2oOlOMyAPxZAcjReToo0XSnxIIwZAL4wSBQwQhiLtxja9gC8EM7sJRRy+Ax76XfISsVSK3NzcCkDx4ma6DJ4vhd5WVCvw6yTgxotj3V9zw6CcAcAQ+JrxkyUWWOzli5vaSYHIyaNQ6aUlRMAOSq8H3hVnleur0GsQAETOI8Y5gg35PEOUrj+MpKTKJgk/R0haSK7lANNNTYLmrhYww3/FheuwhjeoBaG2Okyyd87aTE42CAkA1Qgk9LG/zw+fYODOwlT1sV9wkMkPFwmDCZmiialiR07VcS+T/+t3WPDPp+EBhIP/8mWFK3HtqEDZ8RYd9pta97lXzaCvwwg8G2VWws8tP7Yi8gyHb54fPtICi9FQgClT0V3Ne9gi1c/iEGyNoYj1quijk7SH7WpqffbZMBXjC48d3m8mgSUktLSwAx2VlwsoNFpNOQtqHkRLlcwm+VLC46HpzzJ3v2+uyYATN+E3vANCILabsLVx+ahc6cOaOAke2W0bN5+Axsxfx4Ugt48/MAcnGdxLhw4YL4IPN3HPG0e4Do4MfBzyp+lg2PjoJt3yevXJmTyalpNOZlIGF6Mq5xDr5nT/+agcBZCQSDMnb6lPJK2CwXRPDDvgnIfxqzwnOYHnLf/r3vQAb9JVUwjy/H9XP4CTjYrDf29MCRd6BfNRNtbjzh+cua11bBZk+CH2wAZVSqJrCvzWgGXVMF2Gqlx/Tco2SAOOFRYTc+8AnevEtkMXSZLkuhGd+3lwWBBxqhr8pdZXyuvD9O2OXMUchUUdYrb1iGFYSm4LGRDULj9YBxSFLmO8cC/mw+nvrjvWdgVwCEBakkwA8rgIWZBRThUVjOlG+I3dsrSRTBPfBT6L94Bd1wnZBAgd4QwlMsydyda4oBQnkhQmeZIgrxMLYuw3zWB/PuVXx4RbrQDZ2Zl86zUcVgKI0voViflFLZCXMZ7V1kh5yUAW8sWdJ+YBnwuAggpOtSF9gPKFID16P5BCW5DDA6Wl5eFTuK2XmjXRwwVV8qoqCNefWMRdDpXhZDyilLzpT0owARCnXIcm1ckjD4/cn7P5MRgAYGu0cmJ+8qBkM78oq9UpP7ABhMAW1enWCasIDPgv4M2SkACfJRs4z5B1GITynWSN6gaT+54VFi9IKFApAjC0BI2U1U/VKFUbkDjIiwtUstX0GHKD0uSis2mc7Mys3JKTk/PKSK+s6kDcyblFibiwBB0CENpobBNSg2PJdHN7gb+8Cu/jw8J8gCyRujsrzwcwU+kDVCJgdx4GmAABZ2ncI/g4wGX7BLHP4KPDoo9TWupL54UpBpwb8tGbAgokHkGeAACrg++olgjJkblHaC2T1YPJaCBn4sZw1y6blXkUN0oFNiCo9zENZiTgkaCWhiXkiUMQL+DmXEnkrCD2RNMk3uAdm3ZZADyuygQA3fEJNXM3Jvzq0oG/oE2DlmI3xl0F1phXt7FfJklFPrgXcIe62rtqzEP/tAre/3Q6YCMl40Tuc4PDb0IJkGYEODeOY2CgZQrJYQhwf3uYI0LVkAfWCVwOvD4gTjI9WURsAoHsBc5gYSz+zAd8Qd9kprFXkAMuyAVFk1jnO7saJAYrKkVvPTiuFSQ/FZ2381pWMNB77sr5yNKDOwjcEfATQJOynxOBggBD8oDcV4XOAHt3VY0z2CH90/+VvxrbEiOOZG8GMrTwFuk2ACUEK1vwRBms0edMv3bTmfrcCPnSSn+Np+ivzbgR9qcpuC817+2h9oz25gghwEBNnMAGlvqg2CPAq5Km5zv/nZnAP98RczwAsGSkoFO8LqxQyaBzI57bvii0ufvGd4EU6vCN0v4uQdm6OYEYtLJ1GO7Sj27SSN8TQwP9r5TOIzjHJ+NECnvKrbfTwMkKfZA0QHP47m3cvCI4EQ3vhdTGmajc0rR7MVfZSjygB/g24uFm8cW4ECvX1HtbknYpw2K5eAuX4On/xDxnOYUns7NZSQ0eRH4wCX2xhmk0HGBoLyp799AQosWm3PCkUTDwCV3o6tAZPHmZGd9uko5rEbCLDb6zvNIdvS1HbMYLIfREZrp7GP4jUCFC6YGB/GqLxojKHSnAEAosVGBg3/9ho1FkjKpoMgR3HMHtcYuwIgnIilWUBRF/JG8L+YoL9FeV7mY8tK9ko8o8r42elEYRzsBAv8G1LpFVkqQRcejebhzjNidEDWamleTpP6gWChnEBKE/SNgq0HlNoxxV5wOF0oM4NtUVmSZRTGTTMwOQd4kUnX1088rl+FPEo6BxbFGsAgXfgAS3kkvwqZIry+tLSgwI2CMyiOBkx5im4FftAHxKEpkoD9oL1prac6pWiC30TOrorirjWmGOdDlgmj02+TKiheBntTsqm4+tCkAWY+5pUQPkRTSRT3IcdFIIDsDwIHZE/EE5iNZURGQho9OL2YBQQREEPVIe6OFrxMTisZKxblpxJJqUE+zJ+jr4hT6t3Q78djSlM5pVNJQ/mwbnGNGUMmClkMvGgKWwDQgAWSLBogoWWEFI1Jgl7kcvicLKzeQ3c/EO5qUWbAYqEEVQAAA8dN16xq+zGwTRIMC6gmAAAgAElEQVR1zBkXX5Szohl5GqwYhgXLcPaU1CJgU0wUkauc3JsYh3xUUJY++UDlg/tPdg9lrwiukDnCfPBvykLlAAq4IS/VO3xepldvSHJKA6qaYPYY80bpdgNQWJqWG40olvNKbnpck1TDOcb1kgAyGGnqvePGOBMAiAIGCCXLYmAFFeeWZfgitnHpeSnDo+VqoS7mlRWxBO3qecqSicsgWTA4ZL6qpMVof94AiOT3jaoxCZYw5pJZGXT3QZ6kAugGhutVmL+nGnKvMI0xU2rfDKFO8ZotUljWmB8CubYWbmkjWCpghOCsAC0wLpmKxtZRAx9z0PujUP6i5JX1BJnxPkoGCFkAWWi3a54YON7O/fti7HQIN3tmbPbFOCgDpM1Y2Q78oCSUBT8QdxqfYILJtLPk007gx07klXaR34jPSQIsZILQz0MDXh5kbLf8bM4t95t+Gitf/yOpdESl9P3/DADo79QtBTN2geTXXpkgWzFA2tsjCMJjxXgUclVkghBw2i0/m/dff7x1BnjBwE6voB++TOhCWlpOqSKMHnoG9Aw8/Rl4mpgfPFo0QE/Dy45yCy53BP5xgR0Llo/6CD+NHiA6+PFozhp+F+9UXH80W9VHPeoMbGaEHPX4J3k8/Rw+yUdn73Pb7jjy3O7v9Eh3+ItNBSeh+fNRMUB28sbYmNWjAAmOE/zgfjpaWzNmCVAETENolN6/xFc7R9y3lGFKAR1kzGwMskH43IDxTXHUAooRQ2+U45AC2/s7RV+SGdgVAGFhix3s1WpTyR51nr4oy2ycLsLcFSCBu5wTJ7r579y9owyizSgIs4vei8IivTX4uF6OCZkb6eqqlMH8oH9FGRJIfrAmnnvrS4qxkEQBg8ECWSTaJbMo7DcGUPDnpiZK8OMAIwCeHZQXInMhTX+NFrwgYNidR+Fjfq0DP2qAXBa2BaEn6cFcPZ4+Nfc8oAcGgZDcvGbaW7HmJGRCnz7MyonlzKLoXbp5D8br2rIocStPk+UCEWLKJ7kU64GRBwDUgrCUc3hUSUMRwGDMz5YAEKz5X6hnwJyARfzqdFqCr2InEA6TBRr3PpmZvSspqMq0ZbDcDbfUMZ96A4AGkuSJ9Mm9+YSSZ6J5d3GppGSmAkGwQm6NY5lx+F880PZzQnqsZi4j9z54qMDXBLhETxgGZ00NyOkoVZAXm2StHvFbfbI8+ZmkAGxcuvyqot5PVcfFaI9JNg5GibespLE64P0xeeczSY7fUEBEGiwT5qHnXLcsFFfl1HP9iumxND2nASGYFRknaeyfDWYYNTBQyIjguoxcelby8SWwdHwAwDQfCs0LBIVTj1layCsBE0qqbYy6E+cCzUOIw8LQPNCyKoZICDrchUBBlsHqmC+UJWQES6Xphi9LVnodDel9/TUAUTNqKPqz+FwOeWHkjGQ9CfW429gjviEwlwAQ2W2gVF4AcHVzBjkexLkGg/dUQlz4gIstguEB0APuJgA28B+OQxQsnID7tMxiP2zRPiyrnQPBPquSyNJktSKQy3iYcfHQjj3GB7liVf7p42n55M6ylCAXxnDYzHIK8l5/+PZpyMQ9LI31GKf20KYeFQOE4EAul14HPygLxQI9C+M7Fff3mofNxf2t/EQOygAhY2Ur8GO/puA7ST5t9EMhK8bp9D7ks8E87RQaCMLPozMKBKnA74j+I20QZCdwZbtx28eFAIKS90KQ/UEAhOboAhkw27/694olspsc1nYMkPa2uf5+mCzbzXmr57ltHnv6r2yXn63W05/bPgMsEgZ92vcfv7+yYAXqoWdAz8DJy0Ayp8lOHNXMNjM/djI8J5t3BY0wjT14nbnAuthKHuao5r3dOPl8XknxMoKBEK5rtKap7ZZ/1M8/bR4gOvjxqM8YfXw9A3oG9AyczAy0hafZALoxzFC+aLYMYtzO4OEx7c6jYoDs5o2xlbH3brtMsOE4wY7d5rfV63a0j1MK7DAskJT1jgTqQ+uG7+3ttNkgCgSRNyVivCDx2g1ZtP1KB0G2Ohgn6LldARDONdLRJfGVJdzWfBmGz0ho5EHhnfqfBkg70UUglYNXhBWV97ViGS8+skXIV6EzfrbpBBFJC7IMspZhuRzsE2XEjC5+htcXFBPYAKXKLUlAcqgV9cG3An4Lt2OQtEJxH9rKDAs7pQCE5OFLUoihcwryQwwNEAA7AxJYHmx7bm5O+YHwcb4Ms2oUTDyQy2Ks4pqsfH9RegfsYuuAV0VeY4VQpqvSXJPWALOgq6eqzKzHzp9eL/QTBGBhf+HqBwqQaBuE2wdOSWDmvmI+WIwhKRumoS2lNqe8P4owbG9kTbIKkoTD7pNcZglFsU74rMD7wwqXkzVfibnJONgzfZCDwjjMmjsuU4Xb8AUZktmVhAIbbN3DkkXaliCtVQEo4AxjfJNL+TnkE6tUBoOpWYcswnCVcmSKqYG51H0ZOY0ceMI9SiJr5dbnykCcj93IvQ8+LzUrGCFwuufrDDI70mDIMJj3iL9XASAEJYw1sCjA3lmG/0U7ZicmlPyZEwAVjxlBkcmFOcWySSykwZTIySn7S8hhTiZu3lLyY/kapHQAlBBcyqS1i8Foj2vdOB1HFwCND+OuSsAZlpt34M+ykFEgmAFAVW9nD0hqOLYwVC+16B2SkjvzywDespAHqyn/lli6JGetTgV+VNItmZKEdIDC7YaZbqaZB5sGxxXsG0ao0hQHzhvvKuAzsGZW3UtKaquShuE9mB2tVkQGnzsPSalesI5mxXDxomLH0ACVUS4AyMoDdLEWAQTu6a3WTt8juS/jfTa1kJP3rq6dkNgKGUNqrnhte5XIRzKdbQd9FAwQgh95eLW0mR9t8IOTOCrwI52d3NVMfSeGxlYJIehgWhyX7k8/+ILs1X7Bj/b4W0k+KWZCcRoIdHlbSTB2qWztAvJg5vwht17kx3iUGSMIYsKPpt3ysxvAQiZI/so7amMPgSCQxlLPwTR9M+Pkwcy29gDZ+Dr/fhxyVX4/32kAiTbkhz4tBwXHNu/Ds/KYXaft7/JSGezLqs4AeVaOvb6fT04GbsWTElu4L69fHoVH3+G8xvbL/Eigkeb73/u+fPzxhzsmjNcGyXRSTg9fkP/l3/6JDA6P7Ci1seNgB3gxk0oJvQoZHdEAroM077sDDHXoVZ42DxAd/Dj0KaEPoGdAz4CegSc2A41GUyYXM/IR6ojt5k/ujM9tlVfRzEt2yHHGo2aAVA259cI9QQ/6WdC8vNSChLsRTrxoHC+0NGWV3fJwEsEPSn/b634xNbZu4nW3OpUMVtEUOxB4Q9CHjA6yQOwmeOZivI0yWBtzRiCk1/gy/Rl0EGS3k+mYX99TVZbd9e/+4tfSaUzI4NnXpOE3wLsjpLRqycSIgFVGr4PE5B1VuK6sMSgoVcWPFYIdjFoTxelMWVwV+EOY+mQ43CX3P39X0ig0m+HBUAdjxFQCwMHq1oYIlYwSwAVJbn5C7oKh0dWjeUTEFsAmQIHa5a9JHCyHQAA+H5A5slXQyY/1rTawCKKQt5qxy2xsUUKn8WzAJQ2wABxpJ0zTAUb4HWCqgFWQv401PBI5NQgwBR4WeFRIY/8WZyQ3AY+PoFYUbwMwBAEKuF5p1TTTQk636OmQIvxSCia8UE5L1pGSxqzGbHH1gBEAZoaTJAZvA+wRXOiMXAbjJCMT+TkpZayQSrolH//yDiSzwDJBUX/x9k2VBUMPmC4wRvd6Q7JYM0i8FIMXSUQWphYA4mjeKBEwTAaHLsBMHiyLklk8obDEUex3YJvDL35b4gsfyuIddExz8/aiTBFAgrE5AYlpPEdpLEEOSbIg+GGpejWPE4A8ZJGgiiluACIELMj2cES1UmhhGkwdZ0V5pvwawAUZOp6RC+KtXZPk0m14lMD42X4G4AdYGrgYdbq0IlWtpLGKggBdnC4NGKnjHMoD7iGziMeYgI0bbCB6jmTJtqHpCqKHk0EYkSdzUwNKCMTw1aVqXErWCNhIYMzA8AoIGdgsbomlzOK25FCcrcsHv/61krBy9kFmDKbw5Ie4TCvwFBmSuSkNBCNTZMUAoADkkGyQEmqYC8zVHWCJjDotkrD1S2xqFr4g4+iUD4p3pAfSBfTA0SiWK2WP1LNRGN0W1FxZWD3uMBuNAHBccnE4JKWq5mXjsJqggemFr8rT6wGyDn6AlSCQwdsIfhzFMdkPs2G/RW6CH6Ef/D/ifG+N8YAJL/zJf5DEN/9YatERMe2GGmyzgxsln+LJu2ophwWeQDuYwe+lS+VhpsOgVuRfvabyTnDF4yHI2rPl+2EnIKr9GudwUBBkNwZIO1VbgSBbyXltk9odn+bxbzTaINFafgASHbUU246TeEpe3Kzz+5Tslr4begaeqgx48dvQ79H8AQ+7Y5uZHzsZnhP8+K//9S/llz/+gfSiseZUWPvNs3kO2apRrs6Ny69+dV/mJhfly2+8LP1Dw4+1K7S4BsDwQtuLZqLj9AZ6mjxAdPBj89muP9YzoGdAz8CzlYF6oyW3p/F74F00A6+ZoDMD/Z1eGYTk+nEDIHu5tj7IESNYwaL/UvNzFKFQE215YOd9Q1abdyGnDzCgCZl4oxkNutYTaWC+131W5uuIgzBa9rKNNuiz1PpUmZ0HqmeUcbyvofmqbh6D4AiZICXI6y/Jp5tf1h+fkAzsCQCJo0gcguHoAFgDBB2caY840G3PWFqKKTZGAs8zKB/FIjjZESx4x5c0QIRMAI/TI5UkCsJgd9tsdrn6+SfS0x2SzkBQ7CgiV41WsXa60bV/X+bTyxKCBAvhhXLALj0wRU9j3Fh1FgCA1mGfQIHfPliQCACPFvwZUnhrWwCwFH12FNW9YAOAFcEK9kBWRgc06apoZFTNE0JHUphMiasjgL8AdsRgku3zSKPTILWCXUK1KJC+ghS8CZzIWjfWVTAwRt3PacBPeVExGToBNFAWigBQX9Cv8pHP2hUjJZ4AQAGT1jC8OzzBkIQ8dviXoMCP+RuKGXF3+sXbf0quffwuWA8w0HbFIdsUhgG5Xay9TewLWDOTeYkvfiz2VljGxsIydW1Vmbo34EXSbIIx0ahJBGNOJq6DygdmSLVHbcvgHUAxM6CM0a0Bv1wc/Zcy8nxOPn/vr+CJQhRG82Kfvv1LCXadBftiENT7IIqTVviGxMTb6YSkCFBP3CYyk+Jq1aTlAvKJBj5PzyAKmTCqhzTXfH5BgR9kbryIMa0AisgIYgwMXVLnQHoJcjhWDbRSLyC6ANQUivelUIgqpghvZIF4wlGlhcxoRuAnk8rIahpa/wwQKiJdvcpcfHzprpwDWBVf0dhHWXT/ejvD0ipYZSa9KL5SSa1CxgjBHJwVCkRJr2bUc15IDBBYGQSzowxgg0tDvEz8rvMyP/FzbCikvFkYKbBEaPbuhCxZCSyRm2vP//wnfyWtzFnIbcGLBB17gcBpJZllAIbXC2CtXHbipkk/nASDQHY7fPlSr1wY1gAktXMIr9OqOiFOShw1AySbnVO+DjYCslv4Uhx0v4k9UJoqV5hdZ35sZyze3sZeGSBt5sdO4Md+wZTN+0lGRRlAHaNazgMsDYNBsT0TYS8MkPY2+IPO6w3gMyon8SI0MXMxiUTObAt+cL29YDkaiKGBIJW+QbW5dTmsPTJB2nPc6X6zZ4e27Bc9TXYaY6fXeOzIBLFYTiuWjMZMOlo/mp22/zS8tvmioc28exr2Td8HPQNPSwYIPnz1ubD4HQdvAtmK+bGT7NVG8OP3vzwqv/mVL22bzp9/8KlcvT2OhqyAhDq31pHeduUjeIHfq2TKJ1e0Zioffoc7cQ1wnPE0eIDo4MdxnkH6tvUM6BnQM3ByMuBxWKULiips+mTYrGbphj+v1bKnMugj3ZFHxQDhpJVHIorwhabG8Fj3p0A/yEY5q5PI7NhP0lPNKRT8sMamn5neVq+QAUOGy1HsI8GWFeM1lc+wcUyxZ7gN+qgwNkpikV2jut31OJEZ2NM7f+DFEeGtNA0TGejU0ouDQeDDbwTwgOKzp3dEFckptdSWW6J/hq2DjAro70KaifdZMEQIhlST8FQoJuEnsig5PBa8QVlYL5VbkllBFz4K4rgKkPBKS6zVpKSDkIhak7gIhfwKqCjAH4ORBhAQHEPXlKsLhfSHdcDvzQHkMCQUO4SgSCw+rkCSGkASL3CAOGS26ANCcMGJrn+uX51pSDVUUFJeORgSEm7hugEAIwl4i3QZNBo/Da+bUfhNQAKqigsYykXRmLXeAOgz0ZCCD8YiCEp5MeYXiT5n1N/1XEHJYbmxvHU1KZWVOKSZouI9H1ASU2RmELtx9aRkoOBQkmMs3KcG42KfRjkf9WoPTIGHn7ukzMkbzqTMLCzKanIRnioACb7mU6bpoSblViqQqsrBMNYhb/zWH8lnv/gnWcoTEMBY3l41H4IO9G5xGsE0AZDASMJPJGGJiSfvETvACCNYNBMANHisawGwcmAhEx1du2DELnoGR5XfCudDQMjdDybNuEVMRRxfHM95MER6YQFDgCwbS0nBVpRRICGRV95S69BrpgL2h6lYUOwPj31A5uIzAKI6xIvtmyBZZTVG5Pb4fcgFNACGTOOi2gWmDYqt4OzkYWzuBrAWgBlSoQyfFWyfzJBU+q7KHSPgH0MxvCh3F3FOIB++YFnslS7ImAWVfFV8GcAYzOLvTk4rcIRm8YufTgHQG1cyYHT5oLwZ5+o+86bMWnwy0LLDSyQEposL55nIdGZG0pk8jiGMlTE/i+Vwsg9q4kcQNALr7XDDCOyL+tInyQDvqDxACDrlAdC1wQ+/d3hXn4i9pplARhmyO23wYyfmxMYx9wJacGzz0oRifvT8X/+7Wj01ck6KX/7WOvNjL+PstC8b/VAIfPjxOVCBVCBlqrbL0+aC887jA7gGCJ4rJxSzhDeOXwS7jBJZW81/JwbIxm1xOa5fiQwJZcBa/edUnpQnCEAQy+LEofPU3sZGOa/KmpzXTjJbO+Vk82v1Os1DnYqRpIMgm7Ozt8dW2xc/y/a2pr6UngE9A48rA7ZD/gQ6KPOjDX64HF+URijAE+/jX38qf/EPv1IMla+/NioTSc2X7nHlhdvh92q7UYaP2eBkOubfjE+6B4gOfjzOM1jflp4BPQN6Bk5uBmh0fmYwKP/jb158aJI21NH6OrQmwOOc/X6urQ86z2xLUzdBOXA92oDAUQADB53XUazH+a+Yril2RpsNwnHJbLFW4cGLmgEZL7I1CXjfU+D26CfCMdvboMSWA93PftOAYtoQdCk1NQWgfW9AX+GxZGBPAAiBjzSAjyV0+ncZLapwXYTclWJ1oFCdQ8G6Y2AMElZFdFjD12JhWskYeexh6Y0EUSweUMX0XDwnqEGyHq/CDfkiymR19vQpRgGL50WAIvGpuzC4zoBBodHm+0+9otgHUl1RQIcfvhYue1CKgSw67uOSsi2gGA+fDASBCgaBDP59ug/m2QUNElxYxrugFpd+y4hEMYaxF0bdkJAqOmvr65H5ETgflApksQjcXQqgyJafBgtAo1gRPIGVu3Q5elCA14rqCeuqDHr6lXRUMRtTxfZoj1cm83ExxDIKYGllbuMGfwmwEFSgCAjLeKAjSSXBdfrUkHgCYEkAIGFxHdpNikXDaAMGXn9LzgVfBGpLsMAl3cNd8LXwKMN4sjBMkBqjV8lC9RqAlgvwHEnJvAnzXkT3OxgROU9UurpMEkWnm7EF2S+oR11+6zcUcEPApQUgx5wFoAVanJsICwBNMmY8o5qJdyyOfQDoM56/Jj2FUcwsgu2kwaaISPdop5TnM7LID1l0xhdbWYk2RxVAYs4GlWzW6REwhyidhe3SMwTiZVIzujSpK/ew1OoV+eijj+HZApmtBXgRAGzh9ihFJjAfZ1CIrB9gWJ/TJLGleTGGz4mp0FLMJBsAIQIl4/AmCRnACkogv2sX3imDdvzKy6sKqCP4kQKDaC4Zk0a1LN04px3Y52RsUm1HzElZmCtKLelUgNT4EuSwgMkpI3jIvplrRenr61M+KTPwplmt3hBgY2KFKXoqDdkteKkkIYe20LwHZhBQnxMQZche3ZkFULaUgUG19k1gg7l0FwCR80NhvKcOWaU4on08CgbIowQ/yFQg+MGi9U6eGVulYzcGCF+3xLYHP+pdo4e2a9tKEswCiaq8WQOL0rI1CLJXBgjHr+C7YGN+KFdEsIjm6PS/2AoE2QsDZGNOydIgCEI5MMY6CLK2UFsmbCuwZatjs91zmmfHA7mqjcbu262zl+fb83IAJOWYu+VnL2M+S8vojTXP0tHW9/VZzcBnd+bk79+bkAtvfVm++z//W9lJ9iqbza7LXu0GfvwS4/6X72ngx1svnVW/1yaSDyRtH2e+q5UHVQkywI8znnQPEB38OM6zR9+2ngE9A3oGTlYGCDB0hd0SDX6xYcqA1447HiUDZLt9exLNzLfbFz5PUGIzkMPHVH9RcYTgR3se7W1yG5QaY6zCbF3FEYMu7W3q90eXgV0BEBYSx+cg57TmsbGa0uSN2ibVDTAH4ujwj5c+FDIz3F7NqJxFYxbyadtAXwmDJalAhv7QS2r2CzOQRQH4ER29iN59ACEAPyzNgnSCfWB/52VIZ81LenkaLfud0uzISMIKmribptpedOAbpeE1SigWFWcvvCpQgO+3R1QxmhGAJFEik5M0vDjI+OA6JoAfFjBQahVId6HOS8r5rCkJL4es9Fv7wRrQJIAcoJ+7nPAoQWduAYbktXpZjHPT4oNUlg/gjwRwa0XFCvDHajXKdEWT/ko5FuF/Ar29oga81Epxca40JKFhB2BP5CXt19gf1UoIxf808AVIZqViKm/sZKUsmMUCfxD4bND7wpxYhoTUtJKHYr7pP+J3GJEnjwS8filBlimDgr2hALYIABHCJh54UYzhb2sO0lO4FbBuPpcWP8AVi6kEWSsNKJqbnVFMixxc0e0BFFXty9JRq8iKxSnW+SWx4Ph5jOhEdzhRMGfREsesNqGYNJKCSfoi6HSnIir32UJcAjmz1GH27TVr4w9EuqVe0j54eB6ceeGcAhsIRJiyGsuDIEgQsmFGnFu1ulcBMTSsJ5ARARZ09txFqTUcMjt9U5YA4nR19yhgqgkPGnptWAxWMHJWJVywAN0fBAKcxvgGdbwtLrdi7mQLVWXuzePLbZPRUgWjpa+/C+eayK1bd8Xe7QawUpHlYkqWYdjJ4PGowpOmL9QpFrMdjCGwOvIW7JPWIUgGTKo6L8llbUwCY5buiAy4llQeZqHpTACJ2+8MakCZGvgYIwPty/c+X5AffTL90CzevNArA1HfiQFADssAeZTgR5v5oYr7iL0yP9oJ36kY3wY/wn/2H5WsE4PMj8y/+veSfeFVIfhx2Nicm42SYPTmUGboACkIgmyWC9tLl8pD4Mem/JhMw2pcjt9sftELZK8MkHYOuDz9OuiFshkEUWwQxGFBEB6vLT07apQBPBq5FO6HxirR9ES3y89hj/3Ttv7xX7Y8bRnV90fPwMnKwGbwY7+yV9sxPxT48f9+TzE/CH4MRMDuBQBynFHIaU06nIPZvOul2SOb6pPsAaKDH4/stNAH1jOgZ0DPwBOZAQIM8yt51DJTUqk9qIS7oTJzbhBKKVCEOc7Yy7X1Uc9vM1hw1OM/a+MxnwSV1pk2TMAmmbFnLScnfX/39CubpuWBCHw4IJUSz8wrMKQT0kAMejv4gB10wly8DvAjMYHCvquh2An0Tbh567qSvgqf65YeD7r4UZUm+4DPedwdkrz+vlyDNJIHrAlHFxkKURTsK2obl06fUdtgQZvrFlnEBhODoEa24IVHSFheHXsVxWrIH0G+iMVyLZoynQJPozmt5JsqU0sKsLA7O2TKmpYEwIlLbq14ZU82xDXiFJ+9QwEPeRTYPb4ucUbL0gjAj6NqFtfZMYmNa14WpsCwdA0a1XIlzJGABwNYggoyUtQ9XjMQC6hrXfWxVhqMFs2oXcBssBRh7G2qoBA/AF8NDSXhmFWAGZSPMsfG1Thk2RAAoSk4JcBayJXAeD2Vw9xgYp7HY7Jtus+cU4CJAkYAdlBKirJSBEzMmZrEVm+p2+DYaTAyDIqZk4/VZOLmLek8bQKIkgHnBNiOkq3RLsRsXu0Yt8EhX6hHMg6HkhBjmPI41kS4AIKouTYhM4b7qisvqUIdoEJE/Q1hNDwL1gaYEF5LXdJIVgm3ykpMPvj+J2rdgV4URPvOK7YL9z8Q8Elw+Jx6jUFTSIJW3AfKl7WZPvT6qNlLQtcQnnutYE75hPArLr1alxAYSg4n9PkAVjXAQuK+lGtm7PuSYo2EIOe1VC3J3VWt8y8C2TPmmSyRHOb3eSIt3b2j0mcfFlvUrgAYlxfHCxtwQP4q7l4CwALGUKAoLZwvKTBi3EWAR5BGYwRce3qLqWUfR5TKoO5tMAGzWUxSLGueOo9j+3vZxmEYIDQkLxbxnoDnBw23nU4AWJYvyl/sZR5bLZPHOa/5NQD8cO7fs2ErBgiZD9VqRRyrc7IV+LH4znfWCuRbzWjvz2nMj+0lwcioWJd8AkhRAfhLZgJlmnYCbtozWGeWFKfVU5vzQ6oo5bUIgvD4MLYzRG+PuZd7zo3gUBsEaRvGt+XDDguCcA5tTxMyWR7IVXH+W8t57WXeG5fhOUAQpA0SHWV+9juXJ2V5nQHypBwpfZ56Bvafgc3gx07MD3p+/M3f/H/K8HxPzI9N4Mf+Z/f0rvGkMkAWC8vyIZQJAi8Oy3e/+105cxbXRJB91UPPgJ4BPQN6Bp7dDFTrTQV+/Pk/3pRKtb6eCDJCvvvNi8cOgBwHA+TZPRse3Z5vBSpt9dyjm4E+8n4ysKfqLJkH3UEAGga7NANgMUDWzAkfBBbm28ECP58XSBbR7yODQnd2+Z4GfkCayd1wKzaBBwXmubllCeO5UqygmA1mGGc7fJrEEU3EKWnFmAc40YvxGLYArfQAACAASURBVCGzlyQjJW1FkIHARt1Tk/fe+4l0RvvwXHLdg4TLj5u1om4EBfo86vl1AAo0Y3T5C8KatDPfJV/qdMjVlevKg2MOkjbx+9NiAiOBfhiUUmqguGXGwrBOgr9FQK4uJaQ+Ma72m9JMLMRTEozSYCzIt/1HOG8XClqFVkj6zfQpgRk8iAOKPYFwjkDjd9kOzw6NMkWvEYYHrA9mNI/tJ1IFGQGo1N8BHxHcpgplBVpQSopgQjELQAbbruDCL28Een1H63jmOGnIajFq9qwCBQgq2KsWtU8MmoHTSJxABwGWubk5JcM12hVR0lqC5wmEmLNuic+lAR44FIvEG4GrCKpOjVMXVGGfDI8FPO/zA/VcNmJMzXg8m4eHR/ewLINBQ8UvAgIlIO02gBGGTFZ5x3DJchYsn1xJdeHNzC+IDyBE75WLeM4pi9gfenUQbCATh8ckBxk2AQjBYK79cbBheG4CbCO7JDSimZ6rBRBGRxLzakEmbFmGXnhF+sPd6vnc7IRMTo2rvJE10+/vASgDGbEwTox5gHoAPu7ivIwvQrKnWJH04oz4uwcAoGlAVe5uSgFujIHne2V4+IL6uwZPFTbxEWyRJEwtAbh4MP8SmDj1+oMvXbXwMYTbYZHXLvZI0PcwIDACKTe+dlLioAwQgh/0sKDXxFGDH5uL+/Rt2CswsDGvWwEJdciR+T97Xzzf/89fYH4cFfixmfmxlc/HVr4XLPbvZV/X80OJK8hpcZ3NXhncd4JRjwIEYY43giB8TBbIZhDkoOd4m53S9uw4armq9viPMj8H3feTup7OADmpR0afl56Bg2egUK7I3OLyo5O9esbBD/7WpScgwwi/wqbJ/tB9GY/bUcKxSJfw+wRPqHsY2W+853KlIuR1My2hUf0ymqM2L9N+vJCqQHZXk/raaUwun0QTHH97Z2pZyWGOO0UBDUxzxQWZu7Egz7355o7gR70BmWU0mZFhw6aqnaICWTLebGiw4227YPEqn9fy6XZ7VLOEHnoG9AzoGdAzcHIywO+ndL7ycAMojNCrteOvzejfGSfnPNFn8uxkYFcAhBIjnb12FJEFoEZe/DW7+N12ydYBEMQ1hgDlleqNOcln7QqMsHRoMkFtM20W231mj0RdIYmlUbBH0bwXEkkWf155QdDQnGGd0h6HbM8pnwmal6fWPGSuCaSm1n5XEkjwOm2QNMrIT7//noyeOi3nz5xBsTu+Dk64wMBQY6Kr2RApSK+1UzpDIRmJnoOfw125dvMjef78Obl04aJiFlDuiAyUdlCCKwyfiBp8IVr47Rvt6JLf+91vy8TkhGIhMPqrIeUPwYuDbKsgNfz4v9BzTvKNiEwn5qQXkl30ySBgMxMOSxkF9BH4ayjZqE74g9QKMgtZqXIDGQLLhowEghUEOphTSSzK7Ipm9A6kAgyblsqX5vcxoubccX5AbHF0vaNoTzCJ4AzHoGeKy9aJbiitA6oGI3AvmAokY2SxfAEXQQS2CHQ0AKTkouBpxFPKj4OxDGCF0QAYsgSTeYJSkSWvLMcacv5sBl4eL2twB44lpb9qS3Hxdo3BAwOG5TCTt2YhaVa344InppZbhBSUF7kwlQGUQcKqUYUHCIyXL/W2AJSsHVh4vNBrplqtqbll4SXDY2MwAiABWBQESNYGmnjfcLYgoTYmPNvIYKwB7KLpeTuY50TjhqSmYvKzNY8Q13CnkuzydkA2zXVFBjr71OK1O4tSvhGX5Bqo5/PbpVQwgA2EcxrsJUt3QMrT0PeDybwDnh+5qradxdmmLM7OynNXXlfHtYbt5FspxSLpgS8OmSiGzK5vs/U5P8o/HPiyv3I2Is+NaEBOe1tmGITxtZMSB2GAbAY/joJZ0M7HVuDH5uL+XnO3mQHCeZ8E8GPj/PljbLPvBdkc20k+bQQ/bJA13CydtTk3VqsGghTNGlOHrx8FU4csika3xgSpdQMNhSF6GwQxzN5ShumlsPZ+3zynvT5mbgiC4NNfrbKTp8lex9y8XBsE2ZifozyfN2/vSX2sM0Ce1COnz/tZzIDNurcmi89vTMqN8dS658duslc682P3s4mflZY1Y/XF2zflx/LfxI/rkVJta3Ahh4akpeWUfHSjpQAOB4zZtwq+dn1ySebjCXn3V7dlYUW77tpu2bn5ogI2vv/uJ9uOyXXvji/IvVhCPnRq8rs+i+btuNW4P7r7kVxFY9hv/+43dgQ/uG4mnZLvf+/7MjwyIl/+8ptbDbf+3IcffiTXr1+X3/gXX5XTY5oawVYrNNG98MMf/gBSuTb52m/+hni92891q/X15/QM6BnQM6Bn4NFlwGwyyAAaPb/20qBQBaMdbAYN+3YGwh/drB6MrDNAHkeW9W3oGXg4A7tWPU1mi3zr0qDch2/DralFsfaGZNl+TZqfhxQjgOCGwe6Ulm9E3MrzIY+CL3w6YAjeCyMHH+SGGtWUOII2WQALowLPBBbwfUGPuvnBBomjWE85JRkMSrOcgYm4Tc7ZehSzgnEq2q8K2zcWNOCBAMgCjK4ZgyEf5I9Kspq+rySuzENu6Tb0wrDiUzGAidGJbiZr2qWYDiNXtB+8lI2qDcDDAz4PpXJLAQlkYfhhhk4GBMGDubmy1NxmSWfw499elnmwFih/xIJ82H8KHUQWccO4XflDQCpGACTYTQAtEM4qZJgg8SUgy5Al0gMJpQBN2QF+MCjhxaAZO8EfynutXJtXwAUL+k6Y4srcPVkC+NBmm0QAisAYBJJZMFSHVJfUTGLpcasCuz8G2gLAEYioKJCH4YaXSH5xUlYxhjIeR+TnbqKbyitxGKVTUkstB1ZMV7hH+VlUqpqOlwvyTd60Bo4QiOgCyBPPhNT2vM68RMK9Yk63JEvZKBQdHWmnhGEKT/CDuVAFf/yrBZ2SWgLwAWAgQLksI4692wYwLSCJboAVizYFXoRwzBmlmEfoMWPpBoBSxLlFZImG5ZC0snkG1LlGY6FURGPS+Ac01hC9X3i+taMZaIophfMJQbP6JeBalMUywuS9uJCXogXSZTgeduOgXO57AZ4kq/LZ/MT6+jxXCATxPrl0W6IGFHVt/fJ58apU2O22OCc93f0SOTWoWCkpAILX738ur19+Sxz9p6Qwex/nlkVWcFw6pFd50hynpvP6juGPQqkmhU2SVzQ/P0kAyH4ZII8F/ACzYS/F/Y253urvjQyQar0g3VswP+L/7j9J+vk3xQ62BIv6+/XG2Ljdg0iCcXuNhlHJO1kspx+SfNrK9SKbnVOSVnvND8dnkZ+gB4PrMhdkhhiNu34lbZVW9ZzGYNFAkMVAl7bcGgjS9lTJ/c6fSho+SQeNNkvmUXt2bJefo5RzO2gOTsp6ep/tSTkS+jz0DOycAXb0kx2wU3CZ2/dXZX6pLK//1m/sanjelr366Q+/tyfZqx/84McPeX5sN5faCegI3W5uB32en5VkMbzxxhvKa5DXMvRedG+DSVmCQXn9tZfEB3/CLJqxmryewDWHuq7YcI9LDumwn5bXOxPiRgNZAZK7Wy3H9dLmJkCtHvWbORvs23ZMLmsd7pOvfq1D7Gg2SoxCLWAbAIb5GB24Iv8C1wxvv/nWrrJX9Rp+/6qGtwem81vltF2QYp52i0a9piSHmQs99AzoGdAzoGfgZGXABCnEM/1BGYp+EZx2nQD1C50BcrLOF302z0YG9lRtIviRKZTgFRGUMIpiAchPLYeT0js2KtHQRVRVMxIrz0orlZCVZl46jCzChyAb1JRVb1WaMyjAQ8LIF4GR+JBDVkBvb7XAJOnqhB48AITsXRTbUJyeuC3ZqlEuXLok03zO2SV5E+jKVqfE794T+7RJvMMuCYOxQC8KSi91va79gu8NR2R5vgxL7JostuZRqB+URiUu9yYgrYRufhbyiyWtwM+ifyhTkVSa3ISS9NLEBEHwg4yOQjojfmMDwIdBSS+tmmC+bgWosxpX0l0B/5gybWckIb3lARDiMILp0TkvN+YfSFG1Ja/oO9GzBuL47FFJFTOK5WD1WMQfcYnDE5LQC17lmUJD8ImPP5Wr1z5QtPK+XqcEXWeR90ExljxiASgTo39FaUFqYOPQl6Xu6wCrxgLzcEyIzBEcj0Kajh4AWSCfVXOXFbOiDiN3AYPjlctnJF1rSgNMDMphCczHKS/mgwQXvTEIQnn93RIxaUyBClByD0ASRqqvKvdz92Sw3i/NkkMGA/BsaS1JFp1VvJGhQgZLFWJelrpHerrOAIxoinO0E14ZZYyvyX6ZGxpgoTxEcKM8mtG+Ko6ABmoEbJDb8o8qajl9RJxGp6Tji4p91G+/KClrXRr5GMAgmsab4WNRVH4kg4NuSZbAUMKZQAZS3Qtj9rlBcQKwCLa6JLF8X1xJDVhbDSRl/JNfisGN8QF20EeFgBYvkFygx3PvBy68oPab8l1ffv2SZJI5yJBFZAkgEsPpDkvV3gTTaFluxO5Jv6dT+gfPS7OWUedGHcDZcrx4IiSwcsWq/OTTGfnkzrKUKhoY57CZ5VRvQP7w7dPiAzh1EmI/DJAyPWsKs+uyV0fZKU9mw8bi/layUXvJF+Wnaui0tEAaiow6FXjc8YubEvlbjaXApxbf+ZaoAj3Aj3ah+3DgRxmFjsSB/VAI1rDQ7yz1AjAcV94n0bk1Rtrajk8T6AUeS/Bjv/nhPvJ4MQiCtI3X14Y+0F07Xy6w0CgfpmIDCLKIh51/9HWZ9W7f0bnXDTsAVD9Kz46t8rPfHO91X57E5XQGyJN41PQ5P0sZYOMHpUbJ6tjNZHx2UZNu3Q384Ps+l80qzw+CH994uU9+8ytfkv0Ynm93DGLzcfxeg8/eUxgEQC7h2uoM2PI1AAGHDTJKjmKc7eZBsIJhXmOutLe3+Z7LcFkHfrPv5vkRAvP73/ybf71rQxILUi+99KKch0qA2635+W03T/qC/d7v/556mRJYeugZ0DOgZ0DPwMnKQKPZhP9rA819Wu2Os7NZIOXeaOF743jnqjNAjjf/+tafzQzsCoCwuyVTcErdD0NcgB+dtj7xWFxijrQAeIj0jnhlHFJNlekpCdkM6BiCPFXEIXNTYCWI1i88c+uGyu6LY2+juB4XSwEeG0GDJGKQgUJr0kALxdhSUmIAPzr9NokO9YstVZa7szMyDMYJ6BVy+dSI5LswXfh2jPRYJIVC9OSdBRk1NaR5ql+qqzUl1dUX9shnM0mJnPbL+OfTMgNQ5uJwv/gtRqG/CMGK25MopAPguEB/DQTBgEDvELrgy3Ln01uquH0aXcLslHruuZfAakBnGszc+XOcMl9mU1GNQ7AgDakjD9gJ9H0IlAGkgO1RW4wrE3eGkmpywcA9m1aAwcYuIXphzF9PgwWyKD4wHowODRxYgfxU3eSVS1dGFQOkkZqU2x/mJDR8RjFPcgAaCiWL9J8ahel2XOqTM/Kl5y5LHKboWWpcAdggWwJ+3JJwtaQJ8INsGvqVEAhiMB8J0inWgsAMsYgWACDYt68/zzElG1OSZwRGOhp+qU8vSeY8QSOH5KbHFYuE41GuywRzc0sBoBY8YhiRpgsgVk75YCzNTIA5gi6yKBkivNi1KUN2gjBNAF08XfzWiGINNQGIRTo1pkeyBRAMAAlBrnwZmsCmpJhBfcmCFeSATBVNXQajF1E0NstyaU55k7SDxuyBEcwVu1EuL4iliflhHykn5m1aIc01Lz4TjO7BPiIIZ+0LShFSasmZVcX2YVCCK9LhkgH4mGRQ7SU7xHv+tDi7hsSGL04CJsuxotTAcHHbSjIbA8gC5gqzWMD7h5Jvj/JCcX1nd/mDX/5TCzl576rmscPFrThuDL7m22X9x/XyXhkgG8EPn+/MgQ2pCVA0wc4i+6ANUGxkThykuN/OFUGURHJZJidvyPlzV8B68CpGhffzf5Chv/2BkmhibAV+7DXf7flvZAfQwBxUpEMDQxx7ZvoOPt/s4gsLgIN5CNltiFZcbM7T+wY/2iMYjUZ8zmqfw23jb4JDAiDyMEEGCwGKa5ffkMLcjHwdgzHXbSbIyh9Dhi/83DrQdJBtEWzhOUPJL0Z7/kch57VxPhvz0waJDirBdpD9PKnr6AyQk3pk9HnpGdAyEAgE5Dvf+X25e+8evmMhE2t0qd9gvGe0/+a9H756/9OL35Bvf/tb0tWl+bVtlUeCH3/5l3+xDn58++tf2RL84Lq/vDMne2V+tLe1l67/reb1JDy3m5/Fk7APh5kjARIPJKr28t1Bj5DdfEI4F4IluuzVYY6Kvq6eAT0DegYeXQYIetyfT8s/fTCDZtkH4H9vp1veebFfhruPt/qhM0Ae3bHXR9YzsF0GdgVA2N3SjQ6uRUgetVCYWg7MAfgoS2txAnqvn8kC5JzC1i6wOS7CBC8vqVuL4gLDw+qDVBOW6y91S/PcBWmmyaQACyRwUYJdNnFkCpKo3JaO7k45daZPXagEgm7pvQhDdLwWS9sVa0HmEzLkC4g1Mko7DUQCoEkQy4pcQuHbDp8Fsk5YGA8qsETk+YEgWAAiL3/pjAJOYuPwEoFpdXTUioK0U/qDGg2ORfrBYAeM2T2KacDufjImTl3oQtczC/g1KQH8YNDPIZOEKToACBbGm5BRKgCIcENeKnf/A8Wu0OSb4AMB2asAkA6CH5R/ouSVBfR/slJSqYwyATejcz0GoIPMA2s6IqUUOqAjGjLdh+J8pxeG5B4YfIOVQDPwHPwwEpN31PIswHcN9kkLYIQFTBT3+YC4T50XQwHm42C5UBKKwIQWbjAr8OEOyTDOz1WBkSH2kaCPCQAJvSqywn2EJwhM0gmcpAxAC4iNAGSgTNn9qYryZOmElJg5ZBIbvDqyn63ANBwVUQtuCFunFX4aL0g1v6KAIjJQrEGXAo8oi0VQQoEwIHgQ8HA6A+IF+BEHE6fkr0ogBVCBXjFBsyzcGlcMmGjPKeVxQuCFrBsIlUndaYKpfVzbNUiBOfvG1N9eyI4R5KjXkVuAJc+ZcbJwIsA+VsszYN34JAiD+xpYKfbetS+7OM65FsZkB0Anjjf2d3nimmIduUNuCQZ6JTU/JTGYpjsb/ZKC2TqDTCGyZ7wujTnE/aP3Td6JCWCMYB98Y/JgwaDwzfPl7DmCM9toDWh78lj+b0axuQeMo4vD2vukvdHeDq/wtZMSmxkgLMIXi5Asg9Se2WzF+9+A90RKMRIYhwE/uD7ZGdkcWE8eSPGZcM7Cl2Me77d6axaF/d4DF/c5dhNdJ2l89n380X+X3t7TkNwryXvv/pX8x7//hy+AH9kX3xZLmyGi9mz3IMCSyYJpBKCtI9yltsfIJKfF54kqM/jDsGIIDF279gsJhqLyevdvg1z2nho/NXJO7r2C954hcuD8cO6cL3NvNjvUXAkiZHIxseDz1++tHAqgUHlA7j+Bqarr7X8tb8p/UTl3TN+XiR8Piuurt+T06YODIBtz7/X0gwXmkqXsIsD7koTRCEAJsa1M71UC9xA879vHNhTUmDLx5F2A/1Xp7xv9gsn8HoZ8qhbRGSBP1eHUd+YpzAALyG/Ca+G1119b3zs2GfCzbavga1YwFbYqULeZH3sBPwqlyjr4we289dJZGYjs3p0f7YU3XN/AVlPTn3tKMrDVufWU7Jq+G3oG9AzoGdAzsCkDrPHMLGXk/RvzkMun8ovW/NnfiUbj0fCxAyA6A0Q/ZfUMPP4M7AqA1Goo6iI8KDY3cWsBRW2HA14dqwAeegBO9ANBbSxZ5KYPxtzdBuUXQsDChboqQQ7BrZmIi9sfRCd+QW5OTUpgQAMi7iq2iMjwxXPicxVlevG2Yoa8cg5SU5msAlP4PGNlMSsZAB5h+HhMAWhxpLMSfesFFJ9gSA65rfUAmMGwhuHVUcbN7pUOtOt32FB+D6HL31EEeBFGMQm+HhMzEoDhRBysBBs+EAl+OH0+KWYyMtIJ82uzW1h+SiUDYrBBNuk2HsQWxWPKosiFojhvjFZUqml4VkBSyxjLS9OJoj5AGhNkqrL5imIFSKWmjMehpwW/jrAqpmdhkk6mB7KlgJI0vDX8kI7icoqVAZknwxpzg8CHpVCUFPYfliAofDbBWIFhIUwA2wbpHJMm5i101D0ffkFqDciF2ZflltyAtFMQuAAGBUDBIn4DiaYfBsGapr+pwABK2tBjZQEyUk4DpL+KcYBNZyUwNqQADkZ66Y7yEylmof0LBkg+bwboMaUkxCiXNR+/LpXrH4v1xVfEnAAQgHUs2bJkur0yaItClgv+IM2kJIozmA22DZDD0oDBsMkqsZZ2jhFoMcZCYsKxI6DD/XJDpkscefHnkMuBEWliDjQfXASARZmqiHNEgS4Q5BIbZc7AvCHQFQ4ald9I8Oygmn8RBXRL1Sv59JIMQeor3XRLvp6VEDr0BYSkkNkri5MAP2BqfmZQW4cAWbrUlA6yJnAIK+6qmKI2aa3mxADQqgwflwLOCYJC7kvwh+noWDdAPwkeID43ZLwu9cqFYQ20UolAeHGe8rWTEpsZICzCZ7IZNT035snPjw/h2cJOjjcGR7c1597P/hCoYrConQOA+E/8TPDY5RvhKNgPOMYHrLaS4cBgUYdBJshP/vuP5P1mUf4RIEJp8JSSvcpfeUdM/NA7QFggM7Ix7sD7qDSRkQsXKN/Q80B26wBjcxUyo9zwRSLI8vdXV+X/DmuGqJHb9+Wlnt+Qnp6DnTttcIC55z44ndQHb0ocMnkz5gSM2BNgXm3fCbyX3bHA24nzz5y5Ip9Bnu/5H/6f8r+OOGX+xk9kxGaV/v7RQx3fdu4pb0bZx0Q9CVPYGbnYKsmr+CxZlzzby2Q3LdM+d5gf/k1mye3JBZnAd88fOH3wIXIeCmDZakq2HbTWt1r+OJ872LvlOGesb1vPwLOXAbIOjiIOwvxI50ryrbdf2BP40Z4jmcR66BnQM6BnQM+AngE9A09HBjwONA7Dl9ZhfaB31R1yQQXj+L/vdQbI03GO6XvxZGVgT+/8xSZMaistJXG1Cj8QI0ydO051gnFQkulfwtjcW5Brv74nNr9LgQbTqFV2+E3SN9QrjZmWuEpBmOMlJWf3iDF7R5YhicUSmqccoU+4ks2ifUU0qnW50iOEUYZk0s3pFXnnj39LyW5xLEE9zIrtlmvLqqM/NAbQBF4YFp9T7GW3lO15qbWSYIh4lExWI+sG+AEZIgA1oVIdBtAVpUXc7OkBywJd+vW8rIAKUTOUZeS5QfiVZCBVRUDGp0AQPwCbn/7iPcVooMxTzQc2QwN6/ijKd59DF66rC/7nkITCtsZOnwbV/w6K+QnJLt2X2YZTLsg55UUBHgJQA7BocjNq3whyVKwGgBRpidr9YJ4MSg3dcoVqHqaDThRf4TUCUGZymeyPOQWQUEvZi3nQa4PhQT4tKOAn8Fo5CymnkZdkuKdPSWFFBYwFsDu4r8ux6zKzEJMo6pQFIFJx2zyYIIQdAjIw1g12hl+xHhgLkGvylUri6wmie7qu/qbdPIENhmJvnI6Iac2gnc/RVJ5BgIIGiL2Y9/DwmCTB+KBMGNflejUDjhOWS6XvQk5qTHoCQ9IN8OTOxHXF8OhDMbsZR+7xr/PiGYlPFuTG+7+SC6MBxTZphcAmwhiyhFxASqvHC6ks1JdT6PImsybSAaBtTTmHfir1DpukE0lapwvUslBkhTPJosaMISOlPA29ZwJFKKjXcVzJzOkAOGYGELOIvDLPNKZneEYuKIktmtMTQKKhPNrVpcc6CrNzgFHSqeTFYpCYslXviLvXIqGLb4l7ICqOigbGqYGOMWgE1h12SQTn18Ywmwwo1B4NA8Rsc+KzQgMrD7qrmxkglBkKhzowR+3jajWxIuNreuH13gd01oNuz4Y588aCPNkft9OaRFgqV1aMjcN08nPMLvj/vP32/6Cmd/Xq+zI1eR+MloKcxmfLN8Mw+wS4cAEyCgcFWTi/dpAxkABjbNEEJl0pJ11gt5DVctBg7l948R3FxFiJL8hnn/5CPv/sIzWczz8OZt4lOXv2hQOzEVjYD/g1QK6d/xkz3IKqdbmfj0so2HlgEIH5DIe71fxdYM79XeUn8h8APN392aTKfxbg7huv/66cO/fSgYAEzrede/6dwYdMYq27iPfFcPZQ4BzH3JgbAnOlNV30O9lV9Z44Kimsg557Bz2vjmK9A2KSR7FpfQw9A3oGHmMGsnuUvdrM/Ngv+PEYd0nflJ4BPQN6BvQM6BnQM/CIM2A1wwQdMud/+jsXpbKhTxo9cFsaoz/i6XxheJ0B8oWU6E/oGXjkGdgTALIQTykAIQOJKR/ADzIyaIpeB4OAhd6NMYWCc7vIX0jb5PzQsAI/uDzZHHfR3J8xmOVFD+R4AjZZgCwTBHzVEEUwAmzLdgW0MBLYFmWxGLlaQVoOMAgwjg/LV/HaMFglhoBdVvNpCeFDbSU2pZY1DmgSRx1GrKtqg6NKqmsOJuv0Ibl4NixX63fhR9GCYTvo7vDDTUJqS20nNi+eaC/2EfJb6OhNp5MoctVR8J4RK/xJGgsluXz2qzKdXFHgRyAYlLySN5pV6w/0+GRmMSbus6cAfcADhBSOBAuqNnGFMUnI1OSnltSyoRG/DHSPrTMVImA/xAEWcDmnt1NKKOK/6LXIJOSdOLonALkZmF8MjYyAPdMtH6AgGUtmpR9FejeYFkMAEBLYz4n3P1dSWc+9+paS9qIpOb05WKAfvvgC5H0WZBbsjIKtKJFlg5qjMWVU0ldkg4BKA4SmDnkoK1gTJYl09SrPjGx2WpqBpiyk02C1QA5ryK28S4owdCe7pYVCbldXFIXAtEQiEdy+KguLWl76nB61neRcVRLzS2Je+UDsMBikXJXaDoENxBTYiZQQY6QlLxGYl/vDNsU26bI04MGSVD4kgR6/Eu6ir4sbz5tXb0urWZZUl0OBOe39qZUgl4UxC2mLDIDJ4Xe5xNMIygIKiBJ1ywgAGLI6aEqfANuDYEgOEr4qNwAAIABJREFU4A5lxgg6JZduC7sIL0MyLDo6ptgtOZjGk+VBwCdjqUsT44wv3RW7iTAezm+AbOW5Fentb0qqtCIm38noVSZj4ubUqkzHNH8aNVlEFyThXhgLi8t+OJkuslwoC1arJcHC0UzW29vYz/1mBgi76NtFfAIULP6Wqg38kDl8kZzz2ihTVC7Dl2clK6PdYIkhX9dWF+Utf+hQUkweSNl5vQGAo1cVgMDie71RlwQYcT+49YEMTt08sBTTxrlTVoTgjQOfR1/BOXkPQAjlpQ5TJGfuh4fGpARpvZ+//zcyhbly7gzO/7PPfi5feu235NTocwcGEdrnBo/tBECboNshfaBhzQFEyPiTkKLT3lf7OYfay3Jd3qam76q5fjY+sT5/7sv16+/LCJg4B81RO/9KDiuTVADFxeGIOodWkR8e+8PExvHbx/blM11q/MMe243z4rlTrZbRLECPq2N2BNxjwk7Gp+rWk+UFTRPnhBEg1nbdXXXoEtNjTY8nLwMmsLJ2M1x+8vbqZM74IODHQZgfJ3Pv9VnpGdAzoGdAz4CeAT0DB80Af4NHIcm+ufmTv88NeO24Y7trhOOel759PQNPcwZ2BUB4oXduqBvSUybF1iC4sLImL3XK3Sfh1y7L8g1NU8/hcMvbbw5La25Q+X3QhDsPpkM2qPloZAB6wMFBxvwDMgUZK1ljejSBPZAdogliYRtghBDo6AjBfPq8XRmvExzJsZaE572RkLo5jShS4vlVlMoTlrLyGSE7hWbtDHqWcBx/LwrcGZO8j67xbOumvPOHV2Q01SFGax8kuYpiSA/Iqf4+7Ti/cl7dk1ViQLGfnIeeUgDSWecAigxJJQ3tI8TY8CC8R+5LuhCSqCMKlkEIIMstqSxXwWYBM+VSv/iaFQXWfPZBSUwooLt7IbkEIIl+Ib1DXfLy2TPI6zK4ETbpge6wv6tTKkszYlmeE2OrIhVfVlIzFcUEYcyuxOU2QIvL8NsIwAslUF6WMszca+heb0GebPLOZ4qp0hnSPB5mAdpcvXFdHMWyjMG0m+BHV3cXuqE1I0oW7Ql6NE1NWV1aVMBDF5gfjKUF+ApMFRRIQ9ADLittT3uAYGacC2CH4L7lsokzZZE+zJ9MDDJOSk0Y3ZsvQrLFgQ7mDiVJtYrnLR2QhYJcTquIAhsK5NN37ylwpSd8WoFp6XQG+12WKCTVsjB373oeth4zDYAfFjn90qvK2wSqRGKH9ldlzWS+3pjDXBuQIyNwQtDKIe6GW3l6qPMp0i3Z5VXIe9HIvFPcBg8YHBWcI4NS9BolPbGkQBSP264kw+p++E1M0Ogc5xdApM7QJTkPkIQAUMpal04Ys6fKcak5/MogkWb1DQBklu4q8hqRcmNF6sZRHNEaiq7TQkNNiMHJy6+8rPJ6nJEv1eSDm8vyo0+mH5rGmxd65XS/79AACIvlHTCqz2RvqyJ/CP46B4nNDJCNY6TSCZlfBUDRAOsIx3x+xXfoInl7fBax7+fmFYBwxhvGMa7IR/FlnNcJ5a+xEWzY734RQLgOLw0W3eljEgXIWgLYkgHA+sknP5Xnn/+K9PUOHpgFwrnTy4TMA4IHvf1hmZtNKxZF4JAADtkBZN188MG7ar4hfD4PhwMKTLh9+2O1X0ODZw4FEjGfy/ElBTqNWM0wXLfLvRn4MgFU8IGpdxgpKRb3b9z4lUwj94wrY2NyFyA19+Xzzz+UN978PentOThLhrmvgPVE8CbkccopNzydynX1uAuA6kHBlY3nWB4gP49tCJ9jHJ9/H8Wx5TaovMZzZ2VlQfyhXjGfAL+ijfu+3d8nmQGSSiXxvbKEhoAuvF8eeC4RGGFTQiqVkuXlZVlaXELu6+o7klKO+v3Jz4PT4VKNLwMD/UKTb6cLvzeOiEG53bn+rD6/V/CD+WkbnvNvnfnxrJ4x+n7rGdAzoGdAz4CegQcZ4O/uWLIgd6aTUKF/4D/mdtrl3GBAQmtNt8eVM50BclyZ17f7LGdgVwCECCnBBnp6WAFgMOi34UitKOCDslc2PwvPWnS0emQ8mJJW2gDpo7gYuo0Qk3JI1komRF6qq+h4xHgdGCppbEqwmZGVZXTpS1wmwBBhtFowADeQiQBjXEhfKfADDJD4zRmZn9WYJCOXz6nnCYKEagkFgOT6YKgOIIPrKHAkAjkYmIuTIULg5rnvfAW+FPfV6wIzaCckpYqQRIna+1H0WVHbIHjC17PxhLYcJ9SL2WFuBHiqGY1l0jd0Tvy1PjW2tRYHyQCADTxGpH9NHx/67C2wKhhvXHpBAUErQJoJgDzf0yXhxoPisHHIK7amC74ay8o8faGKgkkV6BFSQDGhXjVmW+YmpEAW+qsMesckMpIBUAImzdUPcFE+JEOQcILThdouJbzclnNKour88BDAo5QsovjN52luHi57ZHURufdrOvuv9l5QjIY0TMZPQbZnOnNPWuEBGFIm1XgMvxUbVn/wmKF4tjojTZisx40FMfvqUr27IgvWhDSnYPyOomCgd0ixQRh1D5F2mNif6xY3xhmEDFURsjFkVOSXTOiWdiq9/twSnq9llR9K15BH/PD6cFofFChZZLdUNbaC3fUVWR6YUyAHXDnW2R8++Hv0gD3ihpTYHbmqvDgKMK+fnLwrfnSE1wI4NjzdIJPVBcEwvwd8k3nkGGkngEQpMfqHMHpQyM9aPQBQAH4kMM7kMrrfwToK+SU6EBYPCrRBL2hECDO6s70VTaKM8l9pyJCRgXNSgoVZMifaPwJsFpMqOh9FUOs7CgZQHtJgxWLmwMX8zQyQ9txYyGbRl/JIjrBm9r0AmarpUuzQRXJug0Xs+eW8OMGEcYLhVK9bAYYkDy3FxLEJIHz88c9U0Z3gx5987bzcGE/J377/S7l35yO5f+8zSGX1HarQvwT2GhkIrgCYWHh/EEi4AabAWX/2UCwKMjNu3fxgHUD4MqT/3nj1lCz9RUVi2Cb3izJTZIocVEqJx3aGn3mICNgrnP+KJa9AhHDxcFJSieSyApk4Vx9kx77+2qj0jofk+7/8SO7c/kjt22Fzz+NL6bGXI5AesztkxOGR6yiC8/nDgCvMRxuY498DVr8av7fTLffnU4c+tirhCHp9JZOLcv5cD77ntO+w9msn9f74+7a2z0y5DG8xfN/U1yTLuCQfT05Oys9+9h7Ouevw5gqL3YbfT+Th6/FEZSCT/UQsplUZGT0lb7zxhpw5cwa/XSDxegK6CZ+oRO4wWYIff/PXfyM//eH35Bsv98m3v/4VXBJs7Sfy48/G5Qc/+LFi6+rgxw5J1V/SM6BnQM+AngE9A89QBqrwyiX48ef/eFPVP9pBVsh3v3nx2AEQ/XfjM3Qy6rt6YjKwKwBCGYcpgAgMSkr1BXokC8Ahnowo8INBaSuGzxWXFXUPGSBwInyuTsXAYBgAgERCA9IyUmoKBre9AQUq3K255cOfvys9z1+Q4OINCXe/CHknF5gWZIE4JQNNJMpX2VNLUg50SQrF1dTNHJgONiUDJXJNsSwoZ0VwJQ6PjRaksZzR4EPgCOdAcKTz1GuYO6SSUpzHNRR7BmDg3kBxTANRCIAQCGEQBGFQZityfgCASEMyMQBAkABjtFJlCSSy4j3bIyPeHinkS7JcmVsHTrICVgpkUJzBHom+fVlMsykxwHeCYxHMWZmCpBMAkfEb9+TyqRFlJk+QhT4q//zzH8jLF4alY+BL0jnYL6FKU0xdAJEQWTAoDASNUFdfBPhkCNB895z0mCNqXYbLD5YDDM07qK+PW3YNpGpl5mVl5pZEBkckCpN5yps1QoPy9W9+BebSKGhWsIO5iIS6R+XC0JfQvWyTiYlxMYJx0aimYNQMU3drh/QGhtV2FkqzkHyKwTcjC9YMTidIeFmWFlDo+Vy9PoYbCwPsmlxAUbx9whFk6W06sLgNHf3gSHTBswXoQ62aldjqPeUVEjX40cvegEdJUQqFW8pofT4O0OvFV+G7kpDi3DJYLeewJJg8bvps2MRnCMCYvEM8/ZAJA7ulXJ9R8+jsxXaQNxeMhJsArWJxGLxD6uvtsa/Kij0OEIZgFVgFMKc3eMBmWQM/eL7ZjShQoUO3WqlI0WGU6OWz4gZzhLJZedyTWWK0wCslPqUkTSx4TEP2ueqMkh7zBQdQ5Nr6wl1N7jGF22GR1y72SBCA2cYY6QJQhteOItgR29XtQjF1Wflp0Dtiv7EdA6SIQjilf3pKiyjoV5H/pvp7ftkjpzx4H0Jm6qAFeBaZWaxmsHhNc2ve2HHPbR7Gz4F5IMDBYjvjynC3fPu33xL/hzflJ5/flDkwtchQuHT5DcWaOcg+1OtVBRYMgEF1zq0Bmr4w3m34KDssQETviV/88h9ldTWuAIRvfHlEnj/TI1dv98hf/MO82i/uX3/f6IEBHOVdssZeIfjBcAEMvQdvn0w2c2AfFh7Xmzc+VCATg+DNt756CZ/5c/LerXEFinzy65/KSzChP6jUFnNPWTYyh+g1oubOewAgfL4z0nWg94EaCEFgLpmprANzfK7XEpJ5AESUxXpVMWQOBwcwxysrMRkeuSJW25qRUnsCJ/T+JDNAwvCs8gB85+c+u7tWwCR795/fhYzc+/jegTzmhe+ATTigzgurVQdATugptuW0qmhq4Gd6OhXH75zr8md/9ldy5cqg/NZvfk0Gh0d0EGTLrO3vyTb48eO/+287gh/0/Lg7Na/AD4YOfuwvz/rSegb0DOgZ0DOgZ+Bpz0CuVJV0vgJ1EU2RhvtrQ5NgFQzs4w6dAXLcR0Df/rOYgV0BECZlCKBH+AW/pOeNChQgqHEDxSN2yROEuH4TMiu42A/ZNFYA5axWmmACTD3oLA+n7euASQV/p1HuJpAwf+uuAj/Y5d9M2MXlqygQIAbZGRsMyWnojb566YHZOYv19BzxeQdgQ21XclQLKIKnklOK/cAoQC6qA/W/uxPX1OOMEUXxQlAtm/YbARaU5fYUDNtRUCJzIpBehr59Hq9D4khaYvanlOwUgyACPUEGB/qULBdZIx0AVxhaHpxSKXdIDhdggs3P4Z5gEAEYRnjRKCvo8iw6S1LJLsjy9C3xRS8ij1pRnstwXy6f0gCVtrzWgMerwI+zY6Ni7gL4MTAmheV5KYPCFwnaIa0xK5PXbylmyFQmBRaNZmA+nb0LYkhUuD74JopxQkCExfxek0PlezUNJgtADQbnStZKK1mRySkYkQ/geeQrCK8VSw6AUxLcD6zfjWMcQ3M2wRoBCHH5lRcUWNLKxVQR2ljzStDQBbClKR4/WBeQhJoPxOHXYVdG7Klrn0hfP7xWrFgfnh9kkZCxsZwua74oAD/azBKCKNOYmz0Gb4/nx5T0lL/ZkLC1C0jKkgwV7OIAk6WF4xeA7Fa6ZoU5Pc6RABg5uA8ADLo5E5PZax9IEgVUAjeC3N+ZKatzzGOsYj8yil2ShrxWdagKDwBIboFVko5mJBmbFHqxFNK3cPzPiQFFKspz3bqxINnJCRROtQJn98V+cULfP3l7WvwlnKf5FbBtMPFel5g8NkkVa5h3TZbhI0Kghh3uxx0OfNlfORuR50YeZqTwRwBNwo4iWPQbHuqRhflxdJOjmGq17bugvxUDRAMo4gBAyxJaY39YrUbFBBmHJN9hiuTc73YRm+yPdhGbz7PjPgFwi9JYl5zefRf4KS9EAOFXH/zTOoBA9sR5MMSypzrkpdOj8t7nVyHF9M/y1a/+wYGL8G35qC73A8+JwSY+h1EknwOr5VzwYGboPG/vAdyYn7spZRTiX7twVl54/qKa/5uXhuWHv7qvQITr1z+Qy89/GWCgBr7s51ziNtr+GR0DeH9p5B58poOBk7coEIGG3w6AqPsBh1Tuc2kFLhFkIquHub8wos3x0mCfvPtrMA/Hf6328cpL7+z7+HI/eXzJ/jgFUJ/sDEabpUFGESXUDpKXdg43sksIyjFskOMjONc2W7daNd+r9jr7ua/XkX/IFzKinR1PjJzP4SCf/WRo/8sS+GiD3jMzM/LXf/3XcuduSk6f+pqcv/AKgM5OSOE9GV4r+9/7p3cNgvP8fuJ9tToKX7EzMjN9Rz76+EeyGPtL+aN/+S05cxbeZrok1oFPgr2CH9xAW/ZKZ34cON36inoG9AzoGdAzoGfgqc2AGQ1iA2j0/NpLg0qeuB1sBg37tMbi49x5nQFynNnXt/2sZmBXAIQd7cqAHGyHamb5/2fvTZ/bPO9swR9JAMRGbNzAfRMpSqRWa7Mly1si27GdONvt5KYnNT2p3Jmp2zM198NMzT8wVV01H6bq1u2puj2T7rmp3E53J7GTjpfE8SrZsWTJ2veFm7gvAEkABAiA4JzzgC8FQaQIypRIis9jqwgC7/ss531JAr/znHOkqxvFXRTUh8bS6gi/DRkaCIMeKRiX9zvGpcVRD2IB4oS8lFjK0sU4FpBN43csjPLyhtEXQsunZ6AAsErS7lXqAWoXWKQ390GFgHqssmqCioK2UEGQIb3IhTCNBMUMEmDCYVc5HFVSp8Yubq8VKwr2VKVchv0Sm6XELBYU9I1GgqErP00WdPTdlnZ3u/gbapH3bUYeCEgPKE1Y6DfOCMwi3wGqE1p05U9DTQF7L0fMJ/2pPkUe8PW4zy11kLz0Yvcy15nvweLneJ8rMiAlSATpCl7BbuB0wDrP4XqmUMBHOIfYfCBoUl51bjAwipAmh1TaQnL5NpQnV/C6PSIFULUMQFXBdqUTxBCL/yB6LPjlvZmB9FijJwXMrC6xRXuhggF8yN4wyI95APCARX220kqPyh9hboq1zgXLMZI+leIvRHB8oV8d0w8LrI6uM7INRFBFA+y1YnWKLKKqIlEQkAiUEeZURPKKkT8AhQSSwaXElichi1c8VhMIB5siU4YDKQkgoNwL8iMCiyySIk4X1DHYaIzIEpmYycO171eZIrhpZFPFZjndEREPrLusrjJx+v0yGUZhNA6/dOR8jOIr7wXPDKy08DiG8YjpIO4VXuMYip0O5KSgRqHaeBjnBEMykLoqFfm41rDs2lRjlaJt25RFFsmcqeRt6cFufBJ5QRiPcU8u7/MECbgQ8kOQsWIdD0oEuQ0z5Xni67VKyAe1B44bocoBx9FWLIWw9lBoTCldKtp3S+jUcWS3jKHAvgZ2GaB6HEEOyETGDgji44D6o9hlxY/cVy8psui3uaUFtkjvPXAI9EIKEO6Cp6UQFR9OBy330oSNE9ecz12d9Kki+YPmLbBInV3EJjYsmisVCArZVJksN9SaxWUWyC6cOzpPIDyzvQoFbLNsaaqRg9v98ueLV6Sz48Z8IPdyCv0khlKp1F32UULOeK5RRRGFioJF9Aq/ddkFfmZDnDt3TM3PVeRW863yoriL+e9qrRGDRDiF633w0DceKCuFyh6qV1rMeULSxmhUgrQ67XIG83/QwO+bNy/KpUunFMG1DYTHbpBOnHs1SOpXnm+XU9dvqrVxjSTslnt9Sd5QYUP1B1UZbKlUCAXSIvX9jUTwgS3UMq9tprqEY7D/OktKemEVyLB1O8g5/HpddiNJRPyvXj0p23Fti0seLLdn2QOvwAlrWQFiLK+vr2+e/Dh08JvS0rJ9Xg20HDJvBeDSXawAAjMz6b87/IrIK0VYM6PI4y2TE8ffll/+8+9Agohs3dqmlSAPgHeu5Eem8kOTHw8AtD5FI6AR0AhoBDQCGwABWvm3NxRLg99IGk4v2oSNn7QBX+2mFSCrfQX0+BsRgSUJEILCrA8WnZk9kYcQ6E6ERvuralXexHAYxWx/hVwOjKld9R5rXMy3x1EktkgpSAIGn3ulSHry0wXgOncxisvFcq3ntMLbiaI9nd8LXGEZ749J77kzqkjvh+rgyz+fUAXpVF0zgrDjilzpAAFCxYeESmQadjHMH9kF9UG5u06+ZKHuagyZJbCdQHC3M1CgiAEr1ANdgRFEPkQkNYvnEZJN0oaNxe/A1QtKDTKBneQMBp9E8b0wf1R2bj+kiIY8dwWUG+XSNwtmoxBEzTTslPBwanIQde8YlACtMtZ/U5E4JDSGBvuVWiKMg9qqSDA0YaRiFEBnmNss1gb4fgcd0gi1QjB4Xc0jMpm2wXDFp6RbYJFRUyRO2GjYsaN4cPDGnEIlTQb1TcKqCS14AfkUUEGkoNIIIzOFWI7ZfDI70CXxSackHHkSQfi6A7ZUI9NJdT2Ck1G1pq7L47BzmhEnskxMrjiuSYOyGisAmeBHaDJtyqjuOdTaIjFrWKpRBC51+aUTNl6zjhEU0akiQKB9Y5EijQRKC34t8QTSOSJesDDxEAgQBL8XgOVI4fihqLpX2hA8xftpPB/5E8gA2VXnlA4qZxI+Sc5MSWVBoRx+cbsMBgvxOhQcEN/MxEdglZUmwKi2IXGGFBnMF8oStPOBLvV1BpZaXF9lXYuy7xrqOiWXrtzA1ma/bHEVKTLE7rKLz+eE6mFaem99qtZMey/aX40UpJUajW3NYgKG3f3IDBmqlMrGCmmY8ClSxJsPay4QTNypbq8sgf1Vg0xPXVdhtlazSYKwGQsj0FmsUXF57uzIVxNcxUby44PT3XLqavr+MaayvalUXnuqUdzA7qs2EiDNLc24L0/M5VpUL5uUyFaAGPZU/fj53ATFB5UfbKk8Kx6DDHFPyc3+gGwvmcCua/uyw8qNbJHMIrbqH4Vss4mFZqhAkPHDQrMTFnnLCUMngcDdwUND/fMEAovvbOUIXtsJpde2+psqUJwh4/sPvCIN9ZvV67k07hwJIlck2z5qGvlHhcM+paKog+WfYcW0nDBx4t5zG3M7/ZkiIBge/vyTW9S82aikMEgEKixIIrS07FqWjRfHoHqH5FNr5R0btsz5UwXyIIHf8TjsUa6dlOvXTqvgeWZ/kHRi4xpIhmyu9CvsuUYWqNva9uYC+/wxypYN5BhJMpMpPX+SE2xUaVSX4ncyyPEJ/F5crsUW7zPmlxjh54a6xCBYFDmH3CQSgxX+Ozlcy1kA33gPDPZIf3+XvPTSayCh187vq6XW8dXp2qVG+Gqvs5j7p/f+pJQfvLdaW3cr1YcmPr4armvtbP5Orcf7DdqZffD+P8nv/vWPIIJLpBzvOXTLHYHlkh//5Tfv6cyP3OHVR2oENAIaAY2ARmDDIZCHz8nTUH7EEIA+MzO3MxYoFECpW5CfD8Xu6kKiFSCri78efWMisCQBUjBX1Bm8iWBrhNPmo9jiLPLAmHxKOscCkkDBcdaJojLyFNzYXU+SAfEf4gIBMJ7vlTAK254oFByRIRUWPY4d29HxaWX9xOLyzdsjYq4sQ5HcpHbpg/EQT96gXL/cJfl+twQTIAIQks3d9WAepLF+lypCU/XRh6JbYdyisjMGO3tUHkQhivgsrIfxHxuL5rTTSnpm1M7/ifEk1AbjUlXrgY3XRWmbuIT+oHpAnZ3lq+mubrxaLC0onitrLdeueeuuyRkoSpjlALFDdVmpDNoiysop2d0lDFNiiyfH1JhqLWjJCCyBYFFVmg/iB4qUjqvXUPCzymZYQjGHJK38wO5nLwv2W8UxEZI+SEgmUaBPgGzpn6Il17iyAsuDtZjYQYKgwM/vFSZoVMpIIA6CipHpIEYwZvvWNDEQxDEkdoZ6YxIGEZKssIndVKiIGgvUPR43xpkNKPKD8/Y401kffLwJZED1zpfFX7wNWRxQn1xPB8BbQPqQR/eAdCKBhNhrrCMg5jyf6ouqj6aqKjUXtlZTemd0FFZbDRV14gUZM9t5SZEvnmYnMlxS4kUuB/NXqDQawnXxljETpRjqljAUMtOSHLosvolxsaHQWFid7o+ZMtVlu2QiFJai/n6ojIZRnB6GiCRtcdNx9gKwKZPWpw8q7AO4Dxur04VC5tnEgyNigwVYcSGuFRQrpTu3KlXS1K0rMn1hSop2HZCa/L3KRkuF04N4YiaIvaZc/PaUXOvoktuQsFh8XcgWYYB7+noUm1zIUJnbaWBCgHzybsupeWAe8QP+8e/sC8nRc2k1EYe3mNNkwtf31kFttTLNj8LPvifa5c3ffiZb2w4sOwQ6WwFi2FNlqz/ykXdBEqTUPCV9UD08SJGcK2YRO7vIzOeNQjY9+mmNRZVCNZQEueaaZBMIVFB0j4zL7z88NQ90D4gbtuRMUq5cOansmioQkp6rCoQFbCoQGH6eaR9F8oONKoph2GJ1g2CgVZMXeUC5EjjE/fSXH0gnflaN9uHnV+Rm9x0C7dyVjvT8cewXJ/4g+/YeWVahPzM/o9CGvytz6pXM+T9omHtvXxesxU6k1SP4O7UY9lwA10gVROYO/flFL/JAWXeBvCH2dSDZDXsq4/BMlQYtvrhTfDkEFPvvRd4W+292pn+fs2/jvuR4zKv5IjSkFD4lpch7Wkaj+iMGRduF85/Jpk0uWAE2rivrnrWuADkLa7tz58/Jjh0vq/uK1/5+5AevR3a73/HZxy73+4XGYx8rOeajGGO5634Yx/N3aoW/Rp586mX56KNfy7Fjn8lr33x1TWR/PYz1rnSfuZIfHJeZH5r8WOkroPvTCGgENAIaAY3A44cASY8bvePyx+PdMhW7Y83PjWuvPtUgjZUrVf14MOy0AuTBcNNnaQS+CgJLEiC0wOqeQtD2nGIiZZ4Uc2RWZmMRCcXiMhwdFsd4AtZE2BUMEsSLqsTwQK/YN7ekd/5jdh+imN6USkqeGTuVb6Sp1nEoE0ZRUBxmcRm5DsiKxm56BJ9zNQgTZ36Eo8QDlygoI2CzdDswKKmoTcwgXthMIfSDenMk6QVB0C9RWBGw7N2PkKMgwq+rQNKYQB6UzmDH9qY6VZymisCeBNOBRuVBMdQlkdiYsFad525TllFUfSRgr9UAW6fenqh8fv6sFEe6pBoZEbRichX4YRMFJcRAt8qWMAyzWNzf0tAC8gSEQnUlyJa0soNKFBbexYbj2ZB3ET7fI192DsggClglWyulcS5U/cyVM4r1EAMUAAAgAElEQVSYYP9sJFzGp8BO43EEhXZvdan0d3Qq0onB2xNQtRQWMJh7ShwhqD7yK5Xd0gx2Bk9aisQFBUYfslqoomEbj4L59kbEZIZXCuquBVj38Fg3dokXKYLAE0vv7KYiohq1tFGQWAmsswIKEBIbQ92wIIKtWbD7lpRCCTQBhQPtt/w2vxQ6kf9hpv99FYijq2o8NpIi9VD9UDUiyA0g0xQByUGLLhIbs0GGiUIfgyB7kh/jNy6qnJIyD/JJKhGS7gXBFsyXOIrC+dYmqd/KTIY0iUG1THi4RxIgYUh+8Po5WEOdIFHToBQslflW2VQHJU2+7054/Nzc+vE3z41xg5/cSj8DMqZtd6PMuNI/FmGQQ3Q0s8bqpMQ/o9RG0+N14qkol7PXjgLzgJQ4CqUItmWzsyiwwk4M4hKloFFWXL5Z3HMkQlbf/ooLNGGnQxVybLY1pgmkORhUdgFfW6lGFci29jY5+ukXcvnS8WWHQGcrQFi4p8KDlNoI37uM4j6F6kMmAPbc1yjIugsdw0qtwULUcgrNzPe4BflSAL93SIQs1PhaEL9bmmz4XVBVnxMJwuJ+V+cVkK6wp0OjiuIf3z0h//KnM/ND8BiSH2yRcFhOnfpY9u8/ogiQXBqtwahA6B+LyGnYMPXid9wwfvbL8LNvfO0GSaReL+qW/fidk4vNEwuXxL0DeUkTUJiwKaUE/lFNYbTM+dNKqrfvlrS378sJH/ZhXFs+voU58ydxofl3Dkyo+T8DS69cCaihoW7Yj11QU2V+yf2wH4O659r182o+uSo1UvibdhpKrzT2UHxR9bVA4+tfyJC43VDkLIOAonrIuLbHzXN/Pxbp/7R5QHzIcPKacldw0J6NCh+qP/7N958VBnevp7YAX7Bmpj8Gi9BTp07iZ6VGWavd754lUcqfI9rwjQzDXhMbDNhoX1dSUrls4iwXEBjizXt9dLQfv3fS74mM8YowLn/GcyVKFxqP5B3vXxJzzJcx1mSxWKEyKlV2hWbz8i35FhprrTzHvzm1NZuQ87JbPj9xSvbu2yN1dXeIy7Uyz7U2j1zJD8P2yiA/jjzZJnVldywT19q69Hw0AhoBjYBGQCOgEVhdBJIzs9KNz5DHLvaivpT+jM/Nn7XlLjnQVr7qBIhWgKzu/aFH35gILE2A4IOsaezOjl+31S+znhAK8rAbguKjGDXI2AxUHLC/ckGJYUfIdJlUz6NJxQUNoFyOMqljIS5xp0hUVlEtialRpR5J9KOAjeOKE3651DEA+4ACqUnWibXRLa5pFNWDnVILEmIyEpcRhO7eGBiTnSRlBlGAQgW9tnRcriEfItIVEW99evfzrXOD0tS2Vcq9yKRAQPYgdhebaJQ+liYuCmHTNTEElQGe94J6IZFgd5fLGEiSG7S4coDo8YFoAWlhcSN8HQV7ZmCwKcsk7Ox3YYc/H7dv3q/UEFYQICrwA43kB8mNsuJyRTSweUIJudTbJ3UNO6RkCwmBdCNBwHFNIbv0Dl8QMwrnTmdCqQ9CULyw2WD3k4CFVAJh4lNCvKbEaUsq8kJKmlV2iW0Ctlaw+BqeuiWXzvfKTBA7tBMlUFtUwP4JhQ7CDyXJGMLFy5CjEDVVSd6sF3ZQA+KvL8YanSpI/PrYachn0nMbnwjLIObNlY1AzcICBnNRIrHbMopCbWoWJBaKb5eGO6S8JY19EKoTtpnJWRn3hSWOghCJFqo82PLr3DKCY6Yv3FCh7CQ4mP8SCAHjqagKm3dBAcKweTZ+n18Xk8k8O0gH5HeYHXLr7GVYrI1AXeOUsioQKKl0+DuPTY11SYiKGZTNS11VUmAuR0FpSE4PpItLFQiX98YcSoUzUYp1gzii+qM8glB7a74iMApA+nAd8VHkZgwWqLyUcXeTFOIeKIlGpKi+CQH0sB9TBJYDqpiYIj82N1XJzABUCbBMG7fAogjPr4Xmdlrk8I5qaW+8u9Dpsltgf3WnqP1V58qd2VU1NXL40D55+91TUt+1RZoa23MmJbIVIJyPt8gqY5ZaKHbwKwuWaKplfKWqiG04LyQVcy/n+qUeBF6gnhk2izcqQEgK3q+QmX12fr5JXCh62x0OsYK0ZJGTzfhqHG+C+o3NgmyKwkLrvJVSdn/Z37NwWoBzd5dVKIUKW9iK8bK+ku6iVZINKiezObfrbOwCZzHU7fHNkyAcY7H5mxF4X4gC53KaHb+PtzWWqWC69G9WEOH3mX+ufRObQosNc/cD++4lsWfBd7lz5/WtLnfOY7/Y3LbVI8Ad2C+nGdd2mxfXc+7aLnY++2eY33LbxGRAKXyaW8plx84d6263+lpWgNy6dUv6+seksfGZ+xKOvM59/d1y5szHyAA6LsNDvciQGsLPaREUoXapQUF99+5nZM+eZ+/bT67XnuMFx0fl3NlP5fTpT+Q2CDCOx1ZUVC5l5dWy54lnZeeuww+U58P+w1BzMnvnPJRFHZ1XYQmKTQqJ9PsBrqvcXyuNDa2yGZZgJIeWayuY61pX4zgS1/VQtl6/cVouXbyE61ers0DucyGmpqbkjd+8IX/63b/IN/bVyOsvPYtMsoV/l2nlx32A1C9pBDQCGgGNgEZAI7AgAkU2i1T48DnYcsfvqrLYAReM5X02W7Dzr/ikVoB8RQD16RqBB0Agp5982k3lOxBHPmGS/MGbKgDd7oDCAERDKjYhnpGkpGrqxRVGkdyJUFgoQEpiKD6DOKASQ2VAQHIwYS6TGYR9kxhgc86AwOgwS4FzVqaiRbCgguc+duE3OPeKKRxQu+kLwgjatpulyVmDoFcqDMYliaLQvid8KoCbrbzaqiyeZpKFyFywiL+kRez4IOp9phq7Jz0qmDsajUsqH4XqoT5oEApVDkR4Mg9jxpWlEe217FE7Mk1CilhQc8Rm2hJvpXgQzB0AaTyjlHNQWIDwYB7KpANFU4xJYoB2S2zMEgmCyOD5JH8sxcVCAsECGyjaY/lg33Tgtb+CgqZbkSrMvmCBfRhB52U+7sKPASksHLVSkh8MPPfD2stmcyryxZfXqXJVAr1QrqA4SfKDxAIL9fy6H2QAW0++W8q/vV1KYKnFc5WaZHxCSmHdxO8nBi8gIN4pBbVVUpQckCLkqYTH7cpKrAOKmrgN1xKZIrR+GgXJ5IYqpABjkWwQKDpcM6XSGTujiID63XOqjGHAez2QJmQwB1pUjZvSZc3S/DrYxYQl1Y1iT9+AOFG0pY3YDPI/YsDzJp7zTATFidD5wEwBFCx2iYN0iEajSqUxEcEFwL1DJQoeYE4FijCJDwdlwo/8FqhbwknstGcdfC7cPd+DYo+/TmaH42KynpZkrFQRG0lPmQwiu4aYzcI6ixZicbA9pQiAJ4lFfPiPVmelxYUSw3WORruVOw9c2YTkyeamH8iF05/K6CSsx3AMlSKtUFJMpG9JKaiwSy2OZw5JIUgWE3ber3aj32VliUPKoKrJbKaCPOWFuVKNO7OpAjlwYL/c6rgln//5XfGANCsrpUZr6ZatAKF10Nebtyx9Io5gQT1/mWoW7sp/Af8YJp7LubnujGZfzMT46b/7P6Sv9+aS87eAJGlsaBOSDgYBcb+TOA8W7qsq65TqhY1jch2ZjzPXxHNYpMxlDcT9xZd+JJs370aRNK0Cud98OH9ani2HJGJo/VP1O1S3Bv7G/LPXkMucjfnx2E2b2uW//av/PSfseV5r61612z7Xxh3fuyu3yWxFOoh+oXsn+57KdQ3GtWW2QH0GNtlzM/o3mWbxO2sKhebsIxb+ngqAUyc/ADF0W77+tR/id1puP5sL97Y6z65lBUhP9238zORLdXXLouBQJUEFzlu//5n8+bO3VE4QSUQfNiBEo6MyORmU8+c+lcuXP5NAYECOHPnhVyZBRkYH5N13fi5Hj/5GqNi6M15Uent7cE98JBfOHZWXXv6xvPyNHy+LBOF6BgZvy6fH3pCPP35L2colkMNjjEEgAsEelclz/PNCYFMrhw9/V1742g+WrdpbFNQ18EJJcRkULhXIfrmKtb2w7ojFRwVhrsoPzuf05Vvztlda+fGorpAeRyOgEdAIaAQ0AusbAQvCztuaSuSnjm0ynd6HqBZU5DDdE4y+GivVCpDVQF2PudERWLIqyyKPx4riCJQOw1O9crEjLBPIAWlsapLwmW7xbXaIvbYSRIcdH9hRXM6HPROUHfX+bdhNWCbdkV6ZunlN+qD0GEBpn0qR4T5YHIGMqG5uQuHFr3Y8O4ugdrC5lYLEW4AqDogE2kFFYfOUMFcoOydaDE3EBrHTPm0PQ1sohlNXNDBfo1uGAykpq89H4R3Ffniuw51IQoEe7LBEMb4UQeYuh8S6b0CN4pDw9fSuRx+KZFaHF6qKpAwNY+flnAk9VRQkbhzlXTI+J4CxFW8RS2BCes0oxheEoJ9wI3Tbg+yMbjEhnD0041Kh13EUwvNAMk9FJuXqGAgLECFuj0kpM0wgXZKuXikGocAQcSpEZpBB0jkBhYjy/0IN347Y+OoaNZOZeJfYUbR3O6ZgPXVRbrO4P5evQTsqr8oOgZJBjoFcmBAnFAkBkB8WbI+tKfRI+56DUG9MSax/QF0PaxJh8wNDMulCOD0IhRqEp6f3ZoKQ8kwpm6e8PNhqzW2IZ6YGiaFkibE/O23vNIJQqYl4sVTX2mQEc6LqwgQFEDM7SPSQ8Om9lg5VV2oZ2Ifx+TjIm8bWKvW6r7ZJUu4hqGPCUgXFhC06IyU7ds+rSBi+7s63KSUIDpI8FXwOCqyXuSiDYgNpMYWxGTjPTBlalNGKLIKMGaS0kCuDquOsWCLI/wBhRPLJNGGWSlhYUZMxFL8tEeTYPFvkksuw3YrkmWSzu1tqGqoVwULyosxnlZ5+hCn3pBU8+7DOPKhfBkZHpURKZbLOhVycEYl2XpZhEmlosyA8GHpPJU0KYwgCtNdCo/flpc5R6RpMW54Yc6r3u6StoUQcS+w0X+4aynA9jhw5Iv/w//1STpx4D4XW7+cUiJ6tAOHvoExbIhbw79dyLTIbfRjHL2ablSthkD0n9st5P3ngCEiHI/i5SvvrL/aV5xuvZfe12PcLzT1zHQutKVd8eO7mlh3q3xKQq+lx7my5HJu5nuw1ZM85+/vMc+/3mFZfuWK/XNyz52/MMfteedC5s//M65TZT+YYfH6pn4dsjFiovnjxC+nsuiRHvnYIGRU71uUu9fv/Fshe9aP7nru5xrDxgI1WTws13m9U4Jw4/jaCs38FBdSUPP3M63LwqZeUaozt2tXTiqi4dvUySMVfS1X1Jtm754WclXTZ48ZgW8rfw3949+eKbNm3/0U5eOhlKZ0jpknS/ulPv5aLF06oYyoq6uW5576dM6FJ8uPtt38m7779C0XeNDQ2Y4yXlCWUDbacbFF4nVIdwbwgrmsMf0fZ7ke2GL9XMteznN8xX/X8bBzv9z3nZbe7QFT58L7zAgI3Z+53+IZ9LVfyYyHbq9ba9WXVt2Evsl64RkAjoBHQCGgEVhkBEgylbpsUu+51KMhf6A3iI56vVoA8YsD1cBoBILAkAWKgFBlNW6fsOdiqnioYhz0UVCHJsnFV4CdZ4HShwI8d2FWWaknmucRauknakT8xBAKkAnZZEzabsrmiqgBMhgrdHusYl3KoJDgRp61c3CZkdTiDwhwKBnSHBwclD0RI39UedUxNjV8RI6nOSbE3Nqiche6JJIrhWAwyP8RXrOymurqgNoA6hcHnNz+7hlr4rDz99GHxwvaq2F8ipsY2uT0wjB2JCHVHPkgYyg+2JKyhUgUp8SAwOgwSIzJUr54neRENjclQAB/YwV146prEnTAhVwJLwRipGrdU0h7MnZR4JKlUIxOOpAz0pUmfCfj1eMqw8x4TDcchlYBSwQRigQ3iARkvMqtjiZMTTw+MXVF9UHnBbIsPL98GcXFVGlr2SirJwO0kgsuD+HdC4ciMklQ0IAHstJ+dLeMUlWLkYnevDID8kLwyhPQGJQACiIoH022znDv2uaQQ/N2Ef53BPtiPwZ8baovL6Jf48zoFEDRdgTyHfGSisKUmhsSD63R29Jb42hyKs+k5Pihe5KU0bisXa4Ae307pw3U78acPldUXM0iqypwCky/VBxUv7sIRkB/OObJApO2pp9RreQhEv9HDaxFSapO4rQ7KFCp2SmUcZAXJmymoYtjKirZKtLJagghHH7NPSnEAZBLJFmWikw6Hn4bax4LQd9+cKxttzqh+Ka/fA8VGOXBHXgpsv0rzn5Rqj0MpRC52jyDOJi6WuFXOfzlXZiuuknFct2t9UZm6fVrqYZdVvW8vbLzGZBRjljLTJJRSNlrXoO5xWoMqf6Q/ZQcRsrxMCjX5h9DC0YQcvzQk753quqv3p9urpQ7SlpUmQFgPb25ulheeOyi//dfP5fPP35Mnn0znW9yvgJWtAMmGItcCfvZ5D/r9Vx3PWOtSXzm/++HyoPP/KuctZz7LOfarzGk55y6Fefbry+l7oWO/6r2yUJ/Zz2WPkf4+NzqABdlbHRfli5PvyY7tFfLc88+u2x3qa1UBkkgg1weZM9ZCh7KoW6jxvmP+xvHjHyqygOTHj370H5RVoMmUDktP20O5QBL8n0pNQUKEz9Eai+H1bHy8kOLKyODgXGh7Vwh1FvM4mDFE8mP7jkPyF3/x17JlC8iJubyhaPSgKtwzs4jH0JJrz94XcsrECYWCIHPekY8//I1aD8mV17/9E6WAo8LO2OHGD3o7dpJ0e1r+6Zf/USlcMskWCxQwmb9HSNpMRaegVEogRwR/ky0WtWGGSj+uKftngcomYsM1M2OEbWpqEgRTdP58m9V2F26ZWBHPxfJPSDTS/i+z/4UITl4/45okk2lrxoXugY363IPaXlH5ocmPjXrX6HVrBDQCGgGNgEZg+QjwfedgICJXuwIyjdoeW6GZ79PMsrXeK8XMMF7FphUgqwi+HnrDIrDwp/MsOHqHYWuED/NmhJSnomlSYRK5G1N9PVIu28SLYPS+Pu7ku2OTEgwOy768V8UbB0kwjA+lMyHsmEdD3gfVELNlPsmHV3QchIBhqRXqQpE/BsUJdvo3IUuCCoLx/AIJhyJQZiBZIwSbpwlYaqHgXOD2qp38ocIeiaMIzcYd/mxFt6GSgKc8W329UzYVHJAgcjS8eVCWgHSgEoFsSg3O7zKDBBk9JSR4lD3X8IT4JvB60xaV1zEeTuc3lLXCbmk0hPnB8svSJLYijAU1CG26ErBrKiqrUQQJi+Re5J0k8aGdChAqXkq8RYa4Q/wouPuSyLpAMDoJlAlzUmV/TISSKgtFiUBAJpFUyp8dRfi8H7ZQpVI4i5Dy4kqoYWakqKQKhA0CypEFcgP+2hyDKp0wSJb62Ts7TgcRqpqaTOeIMC5lAgqd2wh0N4+5EPKe3mUaAbnARvKDSg42Yl4NkimZQuZKf4+k+m7DLit9fHN+MZQWU9JqTasdaO/VvrVEha4PUqEDxY4znlSkjK9iS5r8qKlTQfXT4zdkSKkzQE7U7xIkREsIeS40X7rZDF4JFl4pFOjz+RwCz2E0BkuxIfE31CrLqlkQOn23u9V90FhVI9fzSW/gPNicFabSBaESS4X0IfyYQfY7cX0FhImnsFxgjqXUIcxnYbh9YPa6ItJIsEVB9pDIKoMSKBK/KZFeL2zVCsUE8gIiGtX8+bPqukVNUCnh+y7cY13nTgtzZKhAQcqN+hpGuH2y1Dt331wUzscKy6y11qZxjdgKLSahMuRhNVphHXr6EO69cfnjHz9XO4F37jh4XyXIvQqQFOyR7lh0sUCWqTZYaANHdh+LrW8psiVzrMw+cu1/sXFX6vml5r/QOGtl7pwb58/55LqOtTL35c7buA7Lmf9KY2KQH7Skq6vzyKuvviLF2HywXltulM/aXJ2hDGDGBxWczz7zLRWgbZAfnDUVTLRl81c0KNuoSShGWMgfHRtW2S18D7B9+0FFYrAPo7FvWmtlH8PXac9EcmLv3mfmzzMIBxIKtLCrq9+mCJDJ0DjIA2SqgcDIJhqyUe3puSnHjr2DTJFuad+2X5EfTyC7xCA0jDHYTzHsNvk3gGoQNhIK0wh+n5nh36T0+xESGUPDA3dlo/A4EhRuzGf7tn2y+4kXpLqqfp5s4Lo7u66qdZP84OvjeB9Ksu/K5TNqHCNX5eChb8znj4wFhpQyJjA2qHJJFvv7xGyTS5dPKiKK6phcyaFsrDby91R+vP32Wyrz47n2kkUzP7TyYyPfJXrtGgGNgEZAI6ARWBkEoqh3kPz4+z9cQm3mTt5nA9wvfvLqtlUnQLQCZGWus+5FI7AcBHIiQDwowHPXFoPMg9GkmGFHNNw3IgU+jxTh+QH4PUVCUDWgeb1lUmUvUUX5ri+Pyg1me9hCkhcA60pZAhqL+/A0UjvzxI9i+HCRFOA8u92uxsnDB9zIZLqwHIYyQuJhMSfyQAY4ZHgmLuNK8YD88+lCZXPFwjkbMx24Cz+EHIcdeKEQp86wqB4ireBRJAgL1r1XeT6LvoPiD0IdgVZW7FfKiBLriHR1z8goitnMwCCpQsWCytmAKsETQ8x5JATLLocyNqJKwmuyymSyT0LIoRCoRny2dI5HM8I+p+wIVgdLwmQOkiN+aVDZGAKVSoBKGLR0Rop6KN5Zi7gsJeJF2DNtu2yRoHSN9okPpIfd3gBP7X6oHEaQIRKSsC2sSBOW/mHgpM7vniqCOgS2TMgPYVF/NtYHSyjaVw0ouzASJZaSUkHuNdbUIOOwHevos4L4mYWSIc2Mm5DvMjvtk6lEVBE8fQOD4kD4vL/EK1dAoKStyaZBdqRVM2UFuFYgspAxpRQ0YVuR1NHiC85pVkcFrkNcqVji+ZUgB8ZlNIjzQKp4JxEuj2D5PrNdqoHxKALTZzCHsLlG5Vura91/TnbBhouqlrF8i8prSQTscnMmKKl+WH4hS4TZKeFe3Eu4ZpOmOnHhGM6RuSddIGRuHftEWuvrJYkw+tKEHbRKXClFogEQFSB7PHjsgu3YbH+XTCPTo3/kMoo+z0gEmTMO4EoiiURVEte32DShHrsxxu3AoAzfuKUIrvy+PhBz+bg3R8UJAoQkHnNpwp4RkSu4zTdtWfWd1m78TBzeWSXNVdQc3WnFHip/Fg4evevAB/zG5XLJK6+8qs7+6ONj6iuLTNx5nLnb1+h+ZiZfsMlXNRt+fnTTCGgElkYgD7lNAlI9u5EgTCbTyo+PPvq1Ij/+8kffUwHN67mtVQVILphSOUDC47/58f+mDncVuZWiIfv3oRmKB6XygDLCAjKZ6geqC65dPy9fHP+jChl34O89lSOGGoEF/ffe+4VSY5T7GxVJQiVKOf5W/ugv/1elpqAKguNlNoPkKMRrRqPiYinyg2RFBxQqXfjncDqVyo9KFc4zez3sl8+RsGGwe1lZNdZmkZKSSkVa8DWDyPj9v/4DMkneUzlgxSV4T4kAdZIYQ1e/kHNnPpHzF76Q17/1E2lv36fGSqWgiu3tgI3XL9X0+/H3vK+vG9lnIwpDtqHBDrly5aTcuHEeCpj/SQ6BCKGyhGoX4rlj1zNSXl6HLKbN98x9AJtNfvvmz+TqlS+Q7fF9FRSvW+4I5Gp7xR4zA8+18iN3jPWRGgGNgEZAI6AR0AjcQaAA2Zgh5ACT/BgdjyL4PL2hMojv44nVV+lqBYi+WzUCjx6Be6slC8yhCPket2/fFjsyIswoUhdg517UbhVfoVWRFtsRHDsBywGPDQXgWL/K6LBvbpGua9cliuJ5HERJGXbkRwdnxO2PKlsqNhcyIwqwqy6/yoUP41ZxoC+O5YWdUGAwpFQZo7f7JEF5GorrcRATFkta7YHIV/FCGeIOmOZDyS2pfimpOQCaohe/5CbxIR8B1gjlZgvnDcrNyRHZ5C6VXVUV6rlOhG7Pgiwp8bjwgbdRwihej07PSpG/WlLIqjAaszZG5or9rciouHijUyxQkUzgA7sfeSK0Tap1tIHYCKuQbhIxTVUoVOQ1KDsuqj2o/OgFR0RrJeM5qkhsIE7cCTAF4AsUYQELLR47UWSVZFGhDKHokUyCVMBuT7cL9k6zJoWNQXhQTeP2+CQ4HpeCIdh3ua5BGVKhskZ4THEkTwah1CkEaeLf9G+ldUebWta1a5egBoFy5OZF6ei4hmuBAPnyEqVcwdZMSeQlZTgfyht868D1dXuqUGRAgQ2ExgQol6FAtyJ2OA6JirxkDMQLgq4taQWKGeFS165fldLUkAzh2pMkoSpDYgjmQKPqRxIgC2rapA7kElUwUyAU3FC80P6KfbKZYy45NzYlwwOfqvuA5Bob7c3YSB6ZolDcgMBgM9mSSkUTh7HYUNdNGWaIfFWtIk5SyJ0ZjyH0HvkyJG/CiSmsrUziIK0SUzEJgZRhK7JaJHTrospG8bXuVMeOe0NSZ6pUhM4Ecj3iPre4QIQQgzB2qk7OkR8eFGq8Mx4QISncWxEoTCLSD1zWQiu0FEg78nJaa9NqHs6JAehsKxmCvtBaSYIcOfKieokkCHf/0gplsd3FyUTaco/Hm4FzYoHC7kLj6Oc0AhsWAfyMZP7cGDhEoxEUfZG7gGIyyY8f/ptvKfJjvb/pXs8KEF4bEsCG/RS/v6OSSF85XreuziuqaG9DAb+ysklZPxWB8HjywIty6+aXighobGhVBALzhkhGnD1zVGVskDg4cOB5KDpaFUHARvWF0bLHiyOwvLf3uvTevqQIF/ZLomSpFsJ7ky5kYI1iY8rm1q1KRcEcjKWaE/lprTiWxA1JOmM+JHBI1L333i9VIPy3v/Pfy5NPvSwevC+MYPMJFR7vvPMPCFJ/F8SRR0qhvq2prp8fjrkitOFia9r0hFKjGGH0Z858DBLjb5Wi5vTpT6StfT/eV7mx1hb55OM31dq5FpJTmfZWxJXXorvrguq3srIef7vWr3pqHqxH9CBX5Qenkx14rm2vHtFF0sNoBDQCGj7TYocAACAASURBVAGNgEbgMUOAdY66Crcc2VMPFfUdwsOHWlCJ++6NQKuxdK0AWQ3U9ZgbHYGcCBAW5RlWHh4anyuUV4mtK6J2I7JFYM0QjyckjCwGUyIo4/yMDeco7oyXRuzkT41JBMVnZQWFHJBEf6/44Y1cDAWCHZkeXbA/GBmdks0gQMxQUwSxY29oaBTZDvxFRauldLHbApKkKGxVdlyJvrBMYl4RZpGEutU8qGg4OT4q4TEUsDE+C9kp87TU+GAjhdc31SDLA6qOKAoFhh1SxOtDkHedzCIgfDIF5QYsmgpcYRWozsK2VBShGJ9K54sgByLQm7aJ4uvFJlNaHYACeR4K6FReODwWpRgZGg0gR2MWKgHYNk11SRJqEE9qRs6cHlAEkc9fAQVJXJEjMtktVyZDUmNuFKcb5yuLriqZgq92pWtSgsl8HJeUqDsfz4GRKLdJvS1duB6ynlc5JVTnFCCPxYUrStRUlgnsvKIVFTI7WSA9sHPyx3uhkPFIAYolRd4mqQZ+k8DZMmGSYB4UPjgvlEoXnrvOvCvFze3S5KlBIHq+zCagZLB5xAHCx1voE4tvQrr7rykyq6qkRalTvF5kwcCeKpkP0qN3WhFiCRhGMa8kOATyjBcJZAPVQMVNHskDuWG0YhMe439HKggrLxPIIuCHIjlD5WOYX1kFsj7y4mJDlsn0dEyGg73pftBBsMQneSOzUgdlQ7F3k9opGwwEEGR+FY/tYnH4FPlhqcG8cTxxTYynZCwYlMl8hK+DyDLDsz0JcswB9Ys9hLXOtcDVs5iUHUHn+Sqbpg9EDUmfoaFr8xkzA6mEWPCcgPxJlnrEWt1MYZNS4UzBKisevtPffMer8GAG9/HYZEwiyALJbA6bWYWDmQrukH4PY3q026ESxIxMls/+fBQYdsr+A6/cY/3CsakCSaWsyh4pFi1UxVrDPiiZzMPvHtzfc/ZJxvcb9SvxMtbOx4vhknncWsVqPcwxG7tHMef7jZH9GovJ/Mc31SOjA6ogzsBzZn7Q9upxID+45vRfQD5any1bHcG8C+Z0sNhO+0yqKj54/9fIKRuTZ579tmzevGs+94LkcU/Pd+XNN/6zCkon6UDLKVpAffbnP4DI6FHn8PcrSWajZY7JcYJQN5JU4Hh9fbfkww9BAuDcXbueVcqRXIgM2mTRlov5GKWljeLzlecU1E5liUEyGPOi+qODGTUgcBIgZJ59/rvzAen8G8Dj3AiJ54aQf/zH/0spRHZj3RV+vLnLaHw/yrl865vMIdk+b8VlsRwB+XFWOjtuKBUI1SHFUM/UN7SDJKlVa+/puabUybQgY+PPEkke/gzxWjQ0IIsC1mQLZa/cNQn9jUKAqm7aXr31z7+Qb+yrWdT2isdq8kPfNBoBjYBGQCOgEdAIrBQCrG1w82dz9Zyn+VzHVIZYTA+37pHLGtb7ZrRc1qiP0QisNQRyIkBo6RSeGIGJFKwLXOkPhQJFRcrsxs78XrEWlCpiZHBqXBLFoBpQEGbjbv2EG4+DyKvA7n1aHyXw2py7jcpuGEfQtDlIZcmYdOBfDciIhM8uuyt3oxCQkL6ZCSkIwloKbdyZksnBARTakdWAD+0F43OFXHP6A34VsybAvFD5UJQwiuuTyqqIigGOb67Mk8SxWyBlJpVdF0PHL41OSv/kLTVGEAXuMHItmMnRN9YHqyoQK7TnYnYJLKwuX7w6f55tJqpUDeOwRjJBTRCPp+A1fVsV7mnpFeo9IUMeeGjj8RTkHcUgYmwo6IZRwE8MD0ERM5xWENDmCw5OY5ZRsUaBMogDN9aRKKJShfkSeApqERbu7fhQzn+zwYtC8qM8tl2lqOd5scszNKDIkzxvu5R77TIEXHlOPc4voVICj8eHz6nr6MLGzgTyPNjcVpPKE0kWELOAFOXHJYJryq9DmOdox1WZQXZDWyusu9CoiBhgXktZSpqhgLAzRBUkQjAYk2jCDCIJyocbp8TZgPknfcpmygz7KZJbnjDY9iaQAyBOZjHeeOw2yKPN4kBAu4kZGuh/Ej7gvA94vCIPaOEF4UhVrUfMUFpEce1IiKS8sKMCoVYXTsgsii5k3VLDvdKRFo9IJM8s1aWwzII6pW/0urgKSiSIgHq2Wl+ZmECOjE+nQH6MQ/3RJXXtu0GibVZh8bz3prADl60c9ym/j0VAiOH+4P1FMsY7p2QyrM54LBUsLPBbnJOYO6zjinBvYBwTyLLVbiQ+Pjl7W05dTZN4xny2N5XKa081itv58GywjLGoBPnGKy9LbV0Ndvi+p6xaGIjLsFw3lEaZu25Z7GKxmYUy4zH74fczM3yOJEj6dX7PQlX2V2bQ8vj7feWbD76e61djfI53v36XGnelXs/GBHup78HFyOJdqTFXoh9eK86VZJeBpbGWha7HWsPdwGA5czbusfvhtxQuC+GQiSN/LkLIb2Auw4ULxyQ1Oyavf/OgyuJxOmHXh/v9cWjrXQGSeQ2M3I4//uG/zts2MYzcjg0HR478UF566UfSACUHrz0b1R6Hn/m29PV3KeumDz98A3lONjl//jOlCmGR/vnnv6MyMoxzMsfjPdKLv3lHP3kT6qAzigjgeGwkTngus0UWCvnO7IePp6bwdxgbT9hon2W3322xmH38/b4n8dDVeVEREeXlldLSvBvvm8rn/wbwXBI6JHxIWJDIuH7jtLLTMmyueAytuLZs2SW1tZvusuKi1VhJCf5GQ+GSSEBlDGKDjQoRqkXYH23FSOhQoULsSCQysJ45ImxtbXukAgoQ3ZZGgOTHO2+/K//p//5bKVe3bo18evz0gidStf3FxQ4ZD0VF214tCJF+UiOgEdAIaAQ0AhqBZSDA93AMP5/IyP/g6XS+oP23Fe4Yq9n4WYY1oldefVEGBlDT000joBFYNgItzQ3LOienqmygF+HeA1elttIHz+a2tHoCGQeCNIXINKwZUBinSoQt5U2TFeobuBD4YXNFIqRvalRODXVJbcqnCIGhzojkjQVwUGB+J2fIGZKu6R5xTZZJXyDt/W9Cvz53hSqYWqDwOH/8U+k885FSdwzCRonZDo2796OAD5sqWDAEQVLM4INrPkgTel1PWFCoRq6G27tDZkvGJc8cQLg4fKdRwOauR5VHgWbC7v7JZKEiJDyWMmhO+tXzbAx057++3jQZokLHK0DU4LVwgr84UwjbTO/yZ16Kr3EzwkS3SkfPhFwY/Ej1kR9HwDmIg2keP9YvQyBeHI3lsGzCJUDGSUUVbIkGkGECEih/DOHcXqgKQGYMQU3CFnWMKXsqkh8kNWhMQfKDJEir/SDWA3IJ9lpxQMrXM0vc1WWlUldZJ+O9tIUoBv7w2naMSLI/Buuucgniw31wLAo1DkgaixWqhQlpcOD6DkSlZ6QXOIVktnAKio90IDzzLUh+sNGCaqbjA3GW78YcvWL2o4gO6BgEvgdB92OwFKPVmMOKbI+wCZZQIFRmMVcQY1XgQiZQtHGCfAGEKIDXKSssChLjYQQj4wNzvrVIEtZ+dZ1KCipxLMib5LRS8oTLm8QJTNymIoG9o1JbdBeMS8eXg1LWXC8z+MQ9lT8sxbDjwCWT7uF+ccMWhLkh880Ocs4OdUfQoazAQufPqWDgbdtewL0SlngMtmZzZEiYt+ScM1MVrDeoliFJxNZcs0t97UUWSEfnTWDsRgFlGrthPWmCaH7A1XsQwxuAzr6QHD0HYm+uGV6YX99bRx7tkTTa5u3Zs1eqqqrkT+/9Sc6df1cFy7KolU2EGLt+ObHsQl729wtN3jjmfl9JrvD1XL9mjmPMz+j/UX/PcVkT5dwNjDK/N+aaidWjnmP2eAvNJfv6LnQ9MnE31m3oAFYD/3QtOl2QNuaffQ9lz9l4nc8vBxfjmmZil90Hi9oMar4JW8Nr107KBGyAWjd7VQG9ubl51TOIMrFYicd3kF+J3la/jwCsny5dOqXsmahgYKOlVBUtHLEBIz//7rdrJDdIVPR0X5cL547KJP7OjozALgubAl559cfzORyLrYzvf86c+VTOn/t0frwabCIpLsZ7GKXiyOnt4Xz3JpNFWZkyN+RBGu9xhrz3wbKT6g/mlzAjJDMYnv3y54bzo8Lj2tXLMgbVDM/LJEBoGcag8sznjDkVFqbnNxVJb3Dg8yVY82YoRUgm0QZraKh7XpmYSMSV/RUzTmjJtXnzbmVDptvSCIQQev7Znz/Fxh4osVu8cu1mn1yTO+8/MnvoHhmX3oGY/HffeQI2nXdUS0uPoo/QCGgENAIaAY2ARkAjcC8CKXw2utQ5Kh992SdTsTsOGMUem7y0v05aauY2dt976iN5hp9lKioq5W/+5m8eyXh6EI3A44qAGXEcubYlP+FyZ2K4cwAF5GmVc1AUmE73jZ39tDZ6cvdmFU49eO0acsbzFHnAsG+28k2blZLBlGqRYOenUlt6J3uAr/cEJsUSHUd2NVQkLpASMwXiBjEgHuRo4MtAX0DMnTFkdEBREA5hlx6K44UpZYvEfAU77IqCsFzquHBZxhMpkB9xfGiuEW9pCpkUJqUgmIWaoG/4plRhvKJK5HxEA4qkccNmyeXA7kCMTXUBZoEj8I+p30NR2RQphKXSXjAjAbl1Lr0zctZXICWcn9essk6oYkhBoZKPIPEtNduULUM/PpAPXb+glAvltYdks2ev9MA6Yaj3AqzB/Gp+lvpNUKQMQo1AnMpQaE8Hw09BHTManpESM0gSkDmCTZW0VypO+MXqHlUZHabRaQnNJKBmaMbORjPsryblS3ygbHSHlPVWGJhQthIaTudZkKCwuaG0gCXVSCosoUtDsPyyiTkyIoUU50i1uEfGZKCT13VI6qdnJFg0LoFAOqtjR9MTKhw+5ptV+SUN3irpRYiUHyqZ8OCgVEGNE5uMSqEnJrF8EloIbodapc7RLbO4tsWwzUrNwqoMYfNhKEWc1WbZJLCKQmYHVTEkp9hIjkh5Uop8NRINjYkzRdszqH9AbHn9VeKE5ZjJYleWXZUoPpQWFii1CPugdVYxurHtqJVEV0wan/DDtapGKS/yYa9WYC+FTZdLynF/srF/u8UhSX7fB7JsNiZFO7eIN458Fax7aPC2DI5cU6Hvk8GbEgNvZEGOB1UyZhBZJc4ysZlwXyObIjhXQxkcBSmF+yHP7xQPrKacMyBocD9M4DqzPZd8Tn1dzWbFfdWKDIAXY/DBBKnDZsM91AxlDV97lI1FLBIg3/nudxRZePLkKRBQnygihIWrqqomtcuWnvdGIS67APgo56vHSiPAoGE2fS1W944wrgNnwWBoBjQzAJq2clR8VFUWy4tHnpFdu3eBjPXlrPowvGj587nW2+OkAMmHFJ8qhO98599BvRNQfzuoRLiOwO/f/fb/VaTAa9/8q7vCzmnBxLDxr3/9e/LrX/0n+fTYW0r5QMKLFlZUMCzWSKTx9+u3ECL+9NPfUONRTcJckXff/gUCxG/fFTC+WD/G8ww1pwUW7SkZKv6gjedOQr1EAoh9kvhZqFFlYoS1LzbmQhZV/FvCsHUSJJmNYzU2bhN/RYN0gujg3yFi64LimfZX10BKRfFz1rplnzRDrUiyR7elETDh/UURNsz88KWtsnvXNrxfXFxl+v7xS/Le55dUJp9uGgGNgEZAI6AR0AhoBL4qAsmZWRkYjcixi8wHjqruuPmzttwlB9roHrL6jZ+5CmHfrptGQCPwaBBYkgBh7sbXXv8xiizIdEDYdhyFY0s8XfV12WcQ3F0g1nAcBcsasSZRknYWSyzZLfnucikzlc2voqKyUiLc6Y9miUBdYNSDw2MI6wyrIPFCD/IlsMNuHAV8nj9Uj939UZMUI1jagdB1iUzgA6ofVgVN+BBrkrHuK+o8a02TGtvsrVDHxRNJ/HJLL42PA5NVqj8HitcRS/38nDiP0TJkYGDHInMjrAhNJ3vEHX8IElHrZdtc24NdlUwRQbHJGhZrzCkxE743Dap+2U+JzwdmOSZTTVBaDAyhuF4G1QrUCwmbVJaDlNh/QFwgIrj7nY3WAJMTKJAjtJt2VWyBnrQNFx+bUuXi9EZU/6mxPIlPI8wba6f91x68Pp2H8adGxDWdnhfPKasqFJMrbWuVnEkXKoeTaTLKhTwVctzBbekQeZt/m/iwXjbbBMbZE1NrJAYOXGsn1sBWv2ULbDRqxAxcGRQ+kxiSAmRqVJch3aNhGAUXaFF27JfAjA+vpYvqBTVmKWyok6KpbihUisSDPsLjdmmC6iRkSqsvKgsrpQAfjtmmsXuT15ZtuHAaeDaJB1gZz4kDgeMIl+8FpjGoZ9g2oZBRl+cTY31l0+m1tJZvVa9H8ngt0/cA7wUHrF9MCFuNAcfb4W7xzPrEbkXQfCPIGayLzedDsQOEGo/nfcPWUAaSDb7fxCZmwrxMsMLitZ9rcUdM3Ve8Z3jvGPc4++c15uu8fmvhDxuzPg5tr5ZdLXf/wWc4Ol9bjUZLrH379kl7ezuKTNfl3NmzcqPjFlQhF2DrAqIPtlgkRFjMssGGTbfVQ4DB9cwNYHPBh1+31UPAuBaBQD92/Q+qv1+lZVapr/dCvfYSbHq2KiXbcto0fjcOgtS2oq/iUlgEPuRMoOXMbaFj1z5Fs9CsF36OhAQVHUaYNwPJh5BBduL4Oyrwm4HgziL8LUUQeFlp5XwnzOhg4Z5WWSQgWNivr29RioZstVD2yLTRev75b6unk8kZYfg482LeeOPvVMA41RwMGK+CevR+fWWSEbTCoiXWSjSD4FisL85PkS54/2D8Xlrs2KWep/qkqrpJ2VuRAKEtmGGDNQBiiMocYktbrZJibFpJy7+W6nbDv57Ee0Iv3l/bCmzSVmmHTZtZpufUTcZj42tVqSY+NvwNowHQCGgENAIaAY3ACiPgczlka51Pov50bYfdV2NjrMOmN7OsMNS6O43AukBgSQKERZDtTz2vFsMdlyw6ZO68LIO33iY8R/byzu7RfXct3jg+u2BhPD+LPoyWh354HPu63y5Uvp6HoqnRZ+bO1ex5zqba0nYj6JdB0Gz5+ADLsTj2QuMY4/Prpp377lqfMUdjHDWX+Xmnx8qcf/Zjjm8cz8fsh/PIazus5qYeZ+zAzcb8Do5uZMwT/XTLXLdxTIu0zvfPozKv0/yJeNA8N56Bo3FsZt9wmkJDgPr8iZvm74U7zxn3Rx2Oegrzu3+7c+XT91ZL1uGZr/v5mj+9Xj6fvvc2z2OViVP2PWT0w+fr8F9mv8b9R2wMDI1p8PvM1w18Mq9P5pSNY43Xs+eRtbxH+q3yu4SHWDbZYSpgGO3qBoGRGNy1ayfsRVpQ0B3BTvYhGegfECprIpP92H07K0H6mOm2KgjEYDV37Vof1FE9ylZmz55msTu1TcmqXAwMarfRghIEdb1N9u/bL2XlZeIvLwVJXgnS8MEKiVR78ueOP4skQNZ6y/wdvtbner/50bqMjUV1w8qOhC8JEcvh16HG6EI2xd/JmdOfyaGD35Tyssr540haMPdjbHQU180hk7A9O3/hC9mGAPMmhHsvlOGx0Hg8jsTKnr0vQAXZr0gAqkF6e6+rgPGF+jHWxHErK6G4hCpiaLBDKZGYV7KQAiMbBxIXM9isQVUGra4ym9oYcZ9G5cdylRgca6FG3L2eYqlv2Kpsrrq7LigbrAp/tQqjHxzoVBtxaNOYSzD8QmPo59IIkPAwmvE48zmNk0ZAI6AR0AhoBDQCGoGVQIBB59ubfFLu23ZXd6xvliNzWDeNgEZg4yGwJAGSCYlRVDe+8rXMQvBCRII6ZhFc5/vLKPYbhy7W12KvZx6fPc/MYnT2rtbFCtlGf5n9Zs/JGGehYxZ6jnPP7sPA5y7CYwE8FsMwE9rsdRuvqefnCJrF5mAcu9D8jDlmjnVX31kv5DLX7PMXK2Yt1hefX+jaZR6/0D2RPa5aWxbZlLmc7HEWw2e+3wWu3VLnZMH30L6l9yV9MLsG00osY6B6v0vaGqCysq6OCiRzwSy+1tXVQT2FXcd4YRIe4lTlTCO7hc1QNj00kHTHiyIwDsUaVWtUG1AtZajsFj1Bv/DQEDBBAUcFHRWBKxVsTrVna2urmjM3CKz1tvZnuDiCRl4LrcvYKvy1UoR8r8zGS2BDqHgprDON0O5QmDle6RaDDSgVG0eP/kblfuw78KLKA2EIOgmJTLWIMR5VDYkEbEVLKlWYeLaSgdkWPl+lUjuQVBkZSSsu75pY1jc8p7KyCX0i222oXy5e/Fza2kHKZShVFjqf8//88/dAuAzgvtuL/KftSo3rQr6WBUVyvs6ckuxGsoIqE4Mgcbm94lDWn0u3++WakEzZvHmXyh65cuWkssEqL69DJstZZX9VXdOmbMpoV6abRkAjoBHQCGgENAIaAY3A2kaANZgi+72bPznr9fBZZ22jq2enEVifCCyLAFmfS9SzJgJrvVi01uf3ONxF4WhCjiMD5r1TXbChSO+ELbSY5IlN5VLnd68JAsTA2bgfuJv9QXe0Pw7XbK2sIVsZtVbmpeexcghwc8B6+llbjDRfOUQeXk/T01Mo/v9R2U1RUfWDH/7P8sTuZ1D4L5xXd3D0RDKpwuwZCp7ZqNa5desybALfVETFC1/7vrz40o/kAtQgzAP5+MPfSEN9mxw+/BrIEYeyaLx0+aT80y//o8qMYUj6Sy/+8B6VBhUSzCBhwZ9KiEJLWml0PySo9CBxwHwM5pCcPfuRCgo3xjZULUYfJHZo8XX23Gfyu9/9TIWLv/zKX6pMEhI+DH0n4UM1CQkfpbDNIOS49ghyORj4TqKEoe08L5e2mAKE53IMElFbtu5SBAgzWCoqzsiNG+cVIbR9276crMVymYc+RiOgEdAIaAQ0AhoBjYBG4OEikITzy3BwSq52Maf2TnM6zNJU6ZVi99Lvcx/uDHXvGgGNwKNGQBMgjxpxPZ5GYA0gQOLDaPY1oPxYA5DoKdwHAU1Q3gcc/dKqILCe78l0GHehTIwPynWoJmpqNim1QS2+GlZQ0WhErl8/I1cun1Gh4KWljVBnlCuChNZXH374K7l65QvYNrXJoUOvgfBohT2TW7q6rqvMEJIjVVVNUFfshmrBBDLDpsiPixdOKHVII85Lqy7S+VkkJUiq0EIrEg5LA14vLa3K6dqSODh46GVlHXXt6mVF7LDt3HVYWUuR2GFj1sjEZECt67dv/kzOn/tUGhqbFVljt9mVFVZ9QzuUFrWw3+qRc+eOYY67QMYw0yQ9lakphpKfVK+Xl1emz0UWSiq1sL1V5gLupwDhcbS3amnercgfBtCz0f6K4ehUqXB+umkENAIaAY2ARkAjoBHQCKx9BBiCfu7GiPzXD67KePjOZqIK2F/99LVtIEDS2bRrfyV6hhoBjcBKIaAJkJVCUvejEVjjCLgdhXJ4Z5XUld8JcbfAd73YYxW+pptGQCOgEVgvCKxnBQhVE80o7G/bcViG3vsnRVhMx2Oy54lnFekwHY8iS+OmHDv2jiIJaC+1c+d+pVCgNdSJE+/Jnz97S1lfvfC178mmTe1KzcG8joOHvqFUC2fOfAwCpE7cIDsYZM6Q7127D6p8D77GRtKidM6qinZXn336rnrN4XSqQPA6kCr3y/8w7hWn0yP79x+RwNigvPnGf1YkC8mdPXuPyLZtB8Tl9ikCZhwh6ddvnJYvTvwBuSY3FIHx9a//UGWPWK0O1V0jskt27nwOBMd/UceVFFeoTBOSO8wMocXWn/70S3Us+9/adkDNMRcC5H4KEPbHfpgDUle/TeFAOzGqYZqbtyuFSi5YqInpphHQCGgENAIaAY2ARkAjsCYQoPOF4X6xJiakJ6ER0AisGgKaAFk16PXAGoFHi0ChpUDaG4qltdZ318BrIQT90SKhR9MIaATWOwLrWQFC7MvLKuRl2FYxzPv45+/K+yBCPvn4TWW3xMZAcyo/SH7Q4mr/gVeUXdbFi1/IB+//WgLBMTly5Ieya9ezyIHxqHNYoG/buhekwvdghTWoSBIW9EuKvy/FUI8899z3JIxMpWNHf6fsqljkN8ZjoZ/KD47HPJHnn/++uF13/61QgyzQaB9VWlKBef5ACgst8vHHbymi5c03/h9F7mSOQTsvWlxt33FIkTcHD76i8kiMxnlybOaVkICgpdfJk59IIWyumPtBlUkUmVAHnnwZx31X4ZhrW0oBwn5Kistky5ZdCpvBwV4oYZoVicOsE6pQsi29ch1bH6cR0AhoBDQCGgGNgEZAI/DoEGCNo6naI99/rkXiidT8wEU2s1SX5pYf9+hmq0fSCGgEHgUCmgB5FCjrMTQCawCBGfhgjk3GJIIskMzmwJuAYpdVmAGgm0ZAI6ARWA8IrGcFCPGlYmP79qegtnDJltbtcuXqeeRe9EDlEFLw09qqrLxaFd9JclDdYagcmFPBf4cOflMRAJkZGUqNceAbKuy8r69H5XgwA4RZIE1QV/zFD/4Xqa9vUVZXmeOZzUUIAK9VWRe7n3hB2XEtR/HAOVRX1csrr/wVMkD2ynnkkVy5cgaqj5H5NRljNDa0yp49L0hT09Z55Ydxz3FM2nb96Ef/QeFy9uwJ1YfRqM6gGoZzpO0XcWSjzVdVdaPKE2HjYz6X3RobtylCyQVyp7SsJvtlZYPFucWmIzI2NgKrsBalMmFIuiY/7oFLP6ER0AhoBDQCGgGNgEZgTSLA2samKo80VrrvmZ8OQb8HEv2ERmBDIHDvp8MNsWy9SI3AxkOAxMcnZ2/LqatDdy1+e1OpvPZUo7id2gZr490VesUagfWJwHpXgBB1FvubN21XZMMhFNsnUOiPwwqLjcRISUmlUh6w+E6CgceTNGlv36eOYYE/m6TgcbS8+u53/706hs0o3vNYvvbNb/5Enj78uiIWGCjOZgFRUlpWpTI7jPHmTl/Wl6Iir5rjli27oeK4e02ZYyw0d2Ogu3B5+jsyMnxb2YIVOb3K0qsEweeFhfa7iB+eQ4KHviO5sQAAIABJREFUpAjbQv0bpNP98DMIGOajGO2r4LEs8B7hwQyXT+P0OPwkPULg9FAaAY2ARkAjoBHQCKwLBPheJwr7qzBqINwIarRCs0m4AdQKdwzdNAIagY2FgCZANtb11qvdwAjEEjPS2ReSo+f65lGwmNOqj6/vhVf8BsZGL10joBFYXwisdwWIgTYJC6ozaqodSkExOyczyFR1ZF4ZFuiNoPTFFAk8l8X+hSybjNfKkP1B26rMeazUHZCe491rWmw9i43J45kLQlxqqonLYkfeeT4XbFbqmKVns3aPmJ6ehr3XoJqg3+/HvWIBYaSJkLV7xfTMNAIaAY2ARkAjoBFYLgIpvHm81DkqH33ZJ1OxOw4YxR6bvLS/TlpqvMvtUh+vEdAIrHMENAGyzi+gnr5GIFcErGZYi9R55MVYvUQT6TcBNrNZmms9wtd00whoBDQC6wWBx6lcm1ncz4UoyIUM4HVc6rhcxnrQ+2G5a1psnKXWkH1eLsev1DHZY6+X70mAvPXW23Ls2FGoidpl967d0tbeJqWlpWIp1HaY6+U66nlqBDQCGgGNgEZAI7A4AsmZWRkYjcj7p7skNHWHANmEXJADbeWLn6hf0QhoBB5bBDQB8theWr0wjcDdCFDqeWh7texqufsPPsPR+ZpuGgGNgEZgvSCQgyBgvSxFz1Mj8MgQ4M9NAhsgbt68Ib/59W/k3Xf+IA2NzVLTWCn7d++VA/sPSHNLs1gLC8Vmt4vd4dT5YI/s6uiBNAIaAY2ARkAjoBFYSQR8Lofsai6TYCgGy6t06dPntKH2YVnJYXRfGgGNwDpBQBMg6+RC6WlqBL4qAgUIAnM7LfeQHaaCPNFBYF8VXX2+RkAj8CgReJwUII8SNz3WxkYg++dmMjQh586dkksXTfL50eNSXf07qShH1orXJ7t375IDB56Surpa8Xq94nQWaausjX376NVrBDQCGgGNgEZg3SBgMeXL9iafVJXuuGvODEcvdlvXzTr0RDUCGoGVQ0ATICuHpe5JI7CmEaD35Y3ecekemJDp5Iyaa6GpQCpKHNLWUCIOq1aBrOkLqCenEdAIzCOgFSD6ZtAIrBwCyZmkjI0Nq3/n0K2pwCTvv/8RCJFfS2trk+zbt0/27NmryZCVg1z3pBHQCGgENAIaAY3AQ0SA+WZFdov6l93ydPZZNiT6e43AhkBAEyAb4jLrRWoERMLRhBw92ye///PNeTgKIQV9YlO51PndmgDRN4lGQCOwbhDI3sm+biauJ6oRWASBqakpCQaDkkwmFzliZZ4OBIISn44JSY/FWiYhcuniWUWG+LzFsvuJ7fNkSHVlhZSUlYnL5VqsG/28RkAjoBHQCGgENAIagVVBIJWalcFARG71hZT9p9GcDrM0VXq1CmRVrooeVCOwughoAmR18dejawQeGQKm/Hw1FkkPo9mQ/2HXyo9Hdg30QBoBjcDKIKAVICuDo+5lbSDAD+nXrl2XN9/4Z+nuG3yok5qeisrVq7dyHiOTDOnsvHEXGfL004dlc0uLNDbUKzJE22TlDKs+UCOgEdAIaAQ0AhqBh4hAPJmSczdG5O/fvSjj4en5ker9Lvnpa9tAgFQ8xNF11xoBjcBaREATIGvxqug5aQQeAgIMOj+8s0rqyp3zvVtggVXssYrbUfgQRtRdagQ0AhqBh4OAVoA8HFwfh17zFrg5ZtcBYzbQ3y//+vv3kcdxVl0Gk8kCNUh8xb9+lWucTYYwRL19R5t899vflde/+U1FgOimEdAIaAQ0AhoBjYBGQCOgEdAIaATWGgKaAFlrV0TPRyPwkBAohNqjvaFY/ctuDEjXTSOgEdAIrBcE1kE9e71AuWbnOQvWggTADKyaCpBJQUIgbyF2Y24FxvGh8KSMjvZLIh5Xr9gdLikpLhO73YV+Ctbsep1FRbJjd7s4iu71ql7JSUdCcRkbHZXevq4H6pb5IG6PD/kgtWq+ba2tKh/EjaB03TQCGgGNgEZAI6AR0AisBQRMBXnSVO2R7z/XIvFEan5KRdgUWl2qN2yshWuk56AReNQIaALkUSOux9MIrBICMzMpmQjHZSJyRwLKqVAZUuyyIvRUkyCrdGn0sBoBjcAyEVhgk/8ye9CHL4YAiYTp6SmZik6J3WYXq9Wx2KEP7flYDJ7Nty7LhQvHZGIyKC3Nu1FkfxZ5E15ZSM3BOY+MDsjZM0dxznG5ffumxGJTan4eT6k0NLbIjh1PS9vWvVJUtDYL9W1tW+Xf/w//I+Z999/olQY5GAjIL//pl/LP/9y1rK5dRW7g2KxIjx3t22T7tu2yZesWKSkpEbPZIgwb1U0joBHQCGgENAIaAY3AWkCAtY3mGq9sqvLcMx0dgn4PJPoJjcCGQEATIBviMutFagREIghB/+B0t5y6OiTR6XT4qa3QJNubSuW1pxrF7dQ2WPo+0QhoBNYHAloB8vCuE1UXZ899JufOHVOkwd49Lzwy5cTMzIwMDN6WM2c+lg/e/7VcuXISSo5p+dqRH0hb+35FgGQ3kh99/d3y9ts/k48//I1Eo1HxVzRIub9WpmNRGRnpkKtXvpAzpz+TV179sTz7zLfWHAlC8sDr9YHk8UkK68mH0oVfV7qx35HhIfn4k4+x6cF03yB0vm63O+4hPZpbmsXv9yvSg00THyt9lXR/GgGNgEZAI6AR0Ah8VQT4Lmo6PqNqIMnUHQUIc1G5AdQKdwzdNAIagY2FgCZANtb11qvdwAjEEjPS2ReS45cH5mWgFnNa9fH1vXXi3sDY6KVrBDQC6wsBvdf83utFd6jFauYLOUctdCyPo4UUyQ+SCSXFFfLE7mfuS4Bk971Qv/fO9t5nqPq4cuW0fIhxT518TxEZJD/i8QQUKbF7T5h7hmqVE8ffkXff/oV65uVX/lL27T0ipWU1kkhMS1fnZRT8fyfHP39X3n7r51Jd1STbtz913zUtOthDfMEgEvIlfXcbX1d6SJPZvGCXBuFBkqkYqg4qPfY9sVeFnBukR2FhoRi0jP4ZXBBG/aRGQCOgEdAIaAQ0AmsAAbpfnL81LG9/1iXRRGJ+RtWlLnlpf520QB2im0ZAI7CxENAEyMa63nq1GxgBq7lAWus8MhWrnX8TYEMhpLnWI3xNN42ARkAjsF4QWPm98etl5YJMjBkU9tOEALMx8vNNMjU1KYlkUswm7tq/k3Vh2FklEnH1utF4nNVqxy7+O8o/HsvjmJ/R3387TUBgHFpJMYfDbLbeRRoY88jsm/1SGVBYaL9vXkc22iRRRsdGFPlx7OjvpL6hTXbtOgQlyKdy/tyn2YfPf88587zzF76QSVhl7dv/ohw58pdSX9eixme/1VX16vje25eks/OSXLt2UjZtal/UTmvRwR6zFxhobpAe5eWVsvuJ7bJv3z5p3dwqzCOprqwQf2Ul7if7XSvXxMdjdiPo5WgENAIaAY2ARuAxRCA5MysDoxH58+U+CYRiYp7Lgaspm5ADbeWP4Yr1kjQCGoGlENAEyFII6dc1Ao8JAkV2izy7q0b2bvHD9iJdPmQ4WKHZpGSgumkENAIagfWCwEYtwrLgPzEZgELiAwmFAtLauhcKiZh8+unvpa+vW3bu3C/Pv/ADKSutVOoHBl1fvPi5ysUYHupVl5eZHqWlFbJt2wHZueuwFPvKFbFBJQWtr05+8b5cv5omFM6ePYHn41JRUa+OZb9sVGv0IGfj9JcfyJWr52VifFQ97/aUyJbW7SimvyC1NZuWlR/C+VosFnnha9+XQ4deE4+3TDo6r6p+79cmxkdkaLBHHcKsD7fbp2yZqEThP4anV1U3wvaqXCLhy8CpR6KwxlrITut+4zxOrxUXF0vzpi3zpAfzPBob6qWkrAy4uB6npeq1aAQ0AhoBjYBGQCOwQRHwuRyyq7lMgiBAjFbpK0LtI23juUFh0cvWCGxYBDQBsmEvvV74RkTAZILnpfVusoPP0RdcN42ARkAjsF4Q2MgKkImJAJQSb0p31wXZA6unkZEBlXHBRmIjHofaA2TC9evn5V9+9bdy7swnYrPZxO5wQJ1RBLIgnYlBm6lnn/+uvPLKT5RKgqHn166elj9/9hZCxLuFWSDslxkaTZuegA3SLgwAi6xQUD7//I/yzju/gL3Upfm+OT775ngkTl7/9k9k546DOZMgJcVl8uprPxGHo0iRMmOBoZxuRzNIE6pZcmkWi1mRLCbTxiX9eS+88sqrcvjwM1JXV4vsEa84nUU6yyOXG0gfoxHQCGgENAIaAY3AukDAghrH7s0l0lRVNL/5kxPnBlCfy7Yu1qAnqRHQCKwsApoAWVk8dW8agTWLQDSelBMXB+RiV+CuOTZXueWpbZU6BH3NXjk9MY2ARiAbgY1O2VKBMTTUL5cvf6ZIjacPf0vq61ugdNgkbpdbguNjIEl+pXIvfN5i+d73/1q2bT8IOyM3QrBvK8XIB+//SuV8VFY2Ievj+2K32ZGNcVBi0xGVp0FLqR27nkEw97MgVipxTJkiVi5dPilvvPF3yk5q165n5cUXfwDLqq3KluvatTMqvJwh5mw+EBlNje1L5m1QqeF0etQ/WlexkcjJpXk8pVJWXi1mS6F0dlxXFl4+b6myv2KjRVdfbweImyGl+qisrFdrfdCsklzmtJaPsVptuBe2q40POsB8LV8pPTeNgEZAI6AR0AhoBB4UAb7H4cbP7M2fD9qfPk8joBFY/whoAmT9X0O9Ao1ATgiEowk5eRVBYCduzR9fiOyPp7ZWye7Wch2CnhOK+iCNgEZgLSCwkRUgBv4MB5+KRORbr/9Q2V65itwgGtKZIAOwhBodG1bkB1UeTx9+XcrL0vZVFf4aqCCscuvWZZWvcf3aWdm//4iyt2pv3yfT8ah8ceIPsImaUnZWzz7zLaWwoJXUyOiAssgi+dGAnA6qPBiSbgH5QEKBtldOh1uRDVSP0CKrwl+bU96GQXxwfbmKEnmO11MsTx54UW7d/FKN+dFHv0Z4ehx2XKWKsBka6lYh6GOjo9K6ZZ8igphnslEbCwIPK2B9o2Kq160R0AhoBDQCGgGNwNpCIJWalcFARK5mbf50OszSVOmVYvfGfS+4tq6Uno1G4NEhoAmQR4e1HkkjsKoImPLzxWY1SbnXLtH4jJqLzVIg9ixLrFWdpB5cI6AR0AjkgMBGV4AYELk9fmR5PC2lJRXzuRd8raSkUl5++d/KwadegjpjiyIJDMVDPv4WlJZVKdUEGy20IpGQ5JWJClQ3mk2FpFsV+UGCI5mcgXqkTy5dOqUO2bX7oLRt3auC1I2+mS9Cq6zqmjaoMX6rrLAOPf0d5G945/vN5QH7o1VVLo3jU6USjkzI22/9XKlXzpz+TMpBvEwj64MWXgb5QcKGJA0zT3TTCGgENAIaAY2ARkAjoBF4PBGIJ1Ny7saI/P27F2UoOIXc0/R7v3q/S3762jYQIBWP58L1qjQCGoFFEdAEyKLQ6Bc0Ao8XAgw6P7yzSmh5FUchi81iKpBij1XcjsLHa7F6NRoBjcBjjYBWgJAgMKsiP8kMKiEyLZ28CCPfu+cFSaWSCCyfkqHhAZmamkAI+KS6L8YRWj45EVSPaae1VGPf7Gt4uBeB54Mg0+2wxKqQRDKpMkGyW2VljbKkGkdAOUPKqTpZDulABUiuFlgMhjebLUp5QjssqlMGBzqh/gipaZH8YGM+SpHTq1QyumkEMhHo6U9bg3p9Pp2Jpm8NjYBGQCOgEdAIPEYIGBs/pxMz8yTIY7Q8vRSNgEZgGQjoT4HLAEsfqhFYzwgUQu3R3lCs/iVn0uVDhoCxFRTkr+el6blrBDQCGwwBrQBJX3BXkUcFepMwyCRAUqmUChG/dPGEXLz4ubLDmgDpQTKEjeQAiQHaaOXS2P/MTFICgQGJRqMqH+TYsXekq+v6gqffuHEeZEtYjRMK30uQLHhSxpOZa1nq2OnpKTl69Pcql4TkzDPPfhuqmAMqt4Str/emnDz5iRw7+juldvmLv/hrZfVF5YhuGxuBUDQup7E7tHtkXJ547jvS1rZV56Js7FtCr14joBHQCGgEHhMEWOfYXOeTn77SLiFYgVvM6XpHkdUi1aVFj8kq9TI0AhqB5SCgCZDloKWP1QisYwSmYXvVPxqW0Yl0AcxYisdpldpyl1hBkOimEdAIaATWAwJaASJKYUGbKLPp7rdyVESQ/Hj3nZ/LH979uQpLLy+vhFqkUSlGjDYV+eL/Z+894KO8znz/x9LMqPfeOxIqSPQuMHbAxiaOK7bj2Mna3mya72b/2/fu3t3Nvf5ns8kmN8lu4k2cjRNv4oJbMLbBNgZMBwMCBAhQQQj1Pqqjwj2/M7yjd0Yz0qjPq/c5n8980LzllO95xYzO7zzPb1JTDW8QCCdVItKiob7CZR1JSSlChEgnH5Ofy2tcnXDXAwRjrb5+VRi+vyUjPyB+PPbYt+3SXA0Ij5L09AJ66aV/kebsCQkpwiw+Q3qecNE3AYgfJRevUt76bUIY204RERH6BsKjZwJMgAkwASYwTwgYxAbP9PgQShOvm8IPRCm3CS803kg1TyaZh8EEJkiABZAJAuPLmYBWCXSLnQ8ff1ZN+0tqbEOAB0hBejR98XM5LIBodWK530xAhwT4DxfXkz44aKFjx/ZI8aO1rYXWb/iCNDJPSEwnf/8QeSPSYe3Y8TMhjrziuiIXZ5D+Cq+773mCCgvXu7jKehjiR0pqjthVP7EoQ3cjQDDWqsqLdK3qnOzTsqUbRbqtRLt0W4j0yMzMp4yMXLp48YT0MIGXCXxT1MbrYw6ET84rAoj8uHqtQYgfFbfEj0cpLo4FsXk1yTwYJsAEmAAT0DUBmKCbeyzS/2NgaFiyMApRBMJITLg/BbAPqq6fDx68PgmwAKLPeedR65DAoEiJ0trRT9UNnWQZsH4JQChoWJAv4RwXJsAEmIBWCHAEiOuZMgufj8tlZ2Tkx4LsJfSF+56mRYvWiFRZ1ig/iAuNTbWyAndTYOEeeGcEBYXbGo6Pz6ClIroCBuljFXfFDHUd7kaAWNNy1UqhJzwsgoJDwqVxu2OBR0hwsLXvSloupAmbiC+JY538XpsEFPHj05IR8SMpKZlTX2lzOrnXTIAJMAEmwAScEoAJ+tnyVnrtk4vUZxm0XRMfHkSP3LmA8lI56tMpOD7IBOYxARZA5vHk8tCYgJqAr9GbclJCqcUcZzvsZzRSVnIo4RwXJsAEmIBWCHAEiPOZQkqonp5u6jS3ywusJulJUvxQhIihoSEZAdHYUEOIoHC3eHkZREqrBIqIjKSammqqrS2XniKOXhqov6OzVdQ9IKIy/ETUSbDbQoNa+EB6L2dFfQ1EGQgeiP6AN0mvGDvM2p0JG339I2bvk0nL5awvfExbBFj80NZ8cW+ZABNgAkyACUyFQGtnN12qbqVWc5+sxujtTe1d/WJTaOpUquV7mQAT0CgBFkA0OnHcbSYwUQJB/ibauDiJli+MtbvVxyh29YpzXJgAE2ACWiHAESCuZ0otHPT39Qoj8n55sWKUDn+Qkyc/lh4eShkYsP5hiPeKOACvD6XgXogKUdFJlJiUR5UVV+jihdNUXX2VcnKW2EWXKP4jFZWXaO2au6i4eJu4N8BWl6sf0E9EdCilu9tM6L/6fW/viIgB4QOiTHh4HPn5+ckokOrqMmG6vo4iwqNs9yEFQl19jRBsrtOApV/6kgQEui/KuOovH9cWARY/tDVf3FsmwASYABNgAlMlEB4cQDnJ4aMiQMJDeO1jqmz5fiagRQIsgGhx1rjPTGCSBAwGr1H5LnGMCxNgAkxASwQ8NQLE22Akg4+/QNkpF/MdoyNmmjE8LRB1ER+fJE3S4Y1x/vwR2Q8II+3tTXTqs4+FGfhBKRr4+weQ2dxAN2oqKDIynkJupYkyGoOkWFBbW0U1N6ooRKSW8vfzp8iIKClqXL50XPppfPDBf8shRUUnyH9R/5HD70v/EZRlyzbKf8crED8qqy7RubOHhGBjFWM6OtuoqalCpumquV5Ke/a8LPoXJqsKD4+n3LxVwu8jibIWLKaCwmL6aM8rdODAGxQeEUv5+aspICBIXqv0qeT0fpEGK4yKilaK+0bM4Mfr20TOQ2xRBCeDgzn9ROrha6efADw/OO3V9HPlGpkAE2ACTIAJeCIBH+F1uiQ7khYkrxLfyUfSfXsLD5CQgLHTt3rieLhPTIAJTJ0ACyBTZ8g1MAFNEOgVuS+Pna+j81Wtdv3NSgihNQVi4SuQvwhoYiK5k0yACZCnRoDAWDEiJIj6+stFGqoO8vUdP/JhstMJgcJZQcopmJMjQuNsyUH63cvfExEf+8jHx1emvULaqry8ZbQwdzG9v+tlGc3xzjsvUmtrHW3e/JiM8kDqrPPnjtHhQ+9Sc0ujFFRuv/0hykjPp6LFxXTX3U9KkWPPnt/TlStnKTllgewK6kdkCcSVjZsepMWLN4p2IQiNXSAWlZWdpt/+5l+pUwgfSkE6r0FxrvT8GdlPpcDbBH4fEEBiouNo06YHqLOjTQg7++R4U1ILRKRHHPVb+mx9wr3ri++jlavukWm5prsgSsYsUo+Zza2iTxxhMt18J1ufOvIja+VdtH37o8SeH5OlyfcxASbABJgAE9AGAWyW8hFpvr29vGhI5XeK9ybeAKqNSeReMoFpJsACyDQD5eqYgKcS6OodoBOXGmnXsXK7Lt5emExLcmIoxFM7zv1iAkyACTgQ8NQIEHQzOSVJmIWfElEV5WIhPN7mvTFdk4jIhlWrNlF0TCJlCyEABt/qglRVebnL6T5hfh4SGkkN9dUyggJRHaGhUbR+/VYhAmwVXiEdZOnvp9LSkzIKBNEeA4ODMspj44b7ZPopRGDgXpQBi9UvJCoyju7e+iTFxaXSocMfyPrLr34mr0EbhYs30KKCFbINCBSIShmvwMsjKiqeVqzaQv391ggQtO8jolkcC44nJKSIKJCYW236SDP2oMAwWrhwsYhMOS2jPtB3V31y5hHi2M5k3kMs6rfcoOwFKwjRQFzmloCj+PGlL32JxY+5nRJunQkwASbABJjArBDAZqn+AeFLJzw/1EWJAPEVESJcmAAT0BcBFkD0Nd88Wh0TMIjdDn6+BgpWhXz6iQ9+f19epNHxY8FDZwKaJOCpESCAmZKcLBbnw6mi4pxYkF8yrVEgEBMixML/gw9+Q6bYgnBgMJhGiSxI9bRu3VbKy18phZh+ixATTH4yugMCB6IyhsVuuO2P/qntPASFIOGNgXRZq1dvptS0XGpouCafD5xD2igv8TmCPkQLseL22++X0SAwVDd3WaM20EZCYgaFhUbIfrkjfqB+tAkRo6hwrdvPIzxAFCED98OLJCMjV0SsCPGj8bocMwqEEaTommif3O7IrQstIiKnqvICDQ/1UEF+HiEaiMvcEWDxY+7Yc8tMgAkwASbABOaaANJe3WjqouMX66m3f8RjLjrUT3qixkcGznUXuX0mwARmmQALILMMnJtjAnNFIMDPSMVFCYSUV+oSEerLeTDnalK4XSbABCZFYPyYgklVOy03RUXH0MrlhfTajv0iGuEULVq0xrZQPx0NYNHfYMCuNZ9RwodS/02hEEEUgFCBiA2lqAUJ1OPqPFJ3paVmU+qt1FbOhAylftRxEw3eKrgW753dM9b4rT4l1jGpg0ZUVduM3HFefRz1YjwwW0+I9xevFKdjHqv9qZzDeOGVcvnKKbp943JKSEqaSnV87zQQUDw/kPYKkR8pKSPPxDRUz1UwASbABJgAE2ACHkxgcOgmXaxqoZc+KCVzz4Ctp5mJoRQXGcACiAfPHXeNCcwUARZAZoos18sEPIwAjMAWpUdSflrEqJ4hFJQLE2ACTEArBDw5AuQ2r9toxcoVVHb5Mh0/sYdCw6KlkDBRQWCsuXBc/B/r2vHaHev8WOfUbTpe5/h+rP6pzynjcjW+8c6jrsm27W4fnV3X1FxHx47uEpEmvrRmzdpRacmc3cPHZoaAY+TH449aPT9mpjWulQkwASbABJgAE/BUAiaxYSgmPEBEQY+kwQoTvqcmIy+Deuqccb+YwEwS4N/8maTLdTMBDyLQbxmi2uYuau7osetVaKAvJccEE+fB9KDJ4q4wASYwJgFPjgBB34KDg+nuLZupre0VOrD/LaIN91NyUua0RoKMCYhPzgqBoaEhamltoE8PvE3DN1vooQceFinAkkSqME9+QmcFzZw04ih+sOfHnEwDN8oEmAATYAJMYM4JwOi8MCuKAv197friI6zzkqKC5rx/3AEmwARmnwALILPPnFtkAnNCoFuYoH/8WTXtL6mxa39FThx98XM5LIDMyaxwo0yACUyGgCdHgCjjSU3PoEcefohee30H7dnzMq1YvpkyM/PJ3z/4VgqryYyc7/EEAsPDN4VZew+Vl1+gkyc/JqPJTNsfuY9ycxey98ccTZCj+KFEfrAYNUcTws0yASbABJgAE5hDAvj8jxM+H7EiAsSxIFqbCxNgAvojwAKI/uacR6xTAoPC8La1o5+qGzrJMjAsKZiMXhQfEUA4x4UJMAEmoBUCWvizBX94ZWZlS/+BPXvep4MHd0pj9Jyc5dKU22AwktHAX8O08syhnwODgzQ4OEDt7U1UVnZamp4vWhRLmzc/RjkLc6X4MShMN4fENSg+Pj5aGp5m++pM/IAAyeKHZqeUO84EJkSgv7+fvCzdNNDTN6H7+GImoHUCRhHd4B0Qxp93TiYSm1WaOnrpWn2H3Vmkv0qLDaYQkQqLCxNgAvoiwH9562u+ebQ6JuBr9KaclFBqMY8Y4voZjZSVHEo4x4UJMAEmoBUCWogAAUsswCYL8+Xt2x+n7OwSES1wgj4+8VvysYRQSHCY8IqwD8vXCn+99nNgoI86Otuoy2wW0R6R9MQXN1NBQb4QtGJsiw893V0iMqSCwsPDKCkpmRclZvhhUYsfKUXrCZEfLH7MMHSungl4AAGIHpamC9SQXv16AAAgAElEQVRx4Dw1DQ0SXav2gF5xF5jAHBBISaZokX4zLDeX/GPi56ADntmkZXCYSsub6VfCBL29y+oB4ic8URER8vS9BVSUGeWZHedeMQEmMGMEWACZMbRcMRPwLAJB/ibauDiJVuXbfzGC+IFzXJgAE2ACWiGghQgQhaXiCbJm7VrKy8uluro6qq+rp6bmRurvs1D/kPVKr5t9NHybVRDhnz2Pg4/cJ+BP0dE5QtRKpqjISGFwH+Y0ysPI5pqz9l/J1WvCg6WkgiB+fPmpp1n8mDXy3BATmBsC2NXdXXuGrn5yxip64P9jbwPdJj5jgzjqbm4mhVudMwJmIQTeFN8rGw8dkq/ApUsodd1aMgaFz1mfPKlhbJJoaO2mVnMfGb1HNnxaBoRoyoUJMAHdEWABRHdTzgPWMwGDMANz3G+MY1yYABNgAloioJUIEDVTpEeKiIiQr6ysLIKBNsqgSKtkuJUKi3+2fi31JA5KX5R/vcUf0EajyWVkR2BgkJxfb5HijFMwzdz/Ko6RHyx+zBxrrpkJeAoBRH00H/5ULvRGpadT8KZN5OPnT/h/WflM9ZS+cj+YwGwQwPMvdmNQlEjD2dzeJn83zn92irKffVb30SAGb3iABNCdS1Kpp8+amhRzEhHqR6GBjisiszFb3AYTYAJzTYAFkLmeAW6fCcwSgV7LIB07X0fnq1rtWsxKCKE1BfGcB3OW5oGbYQJMYOoEtBQB4my07A3hjMr8OAbRg+d35ueSIz9mnjG3wAQ8iQD8lar37qEusbgbLaI9IsWir1JY/PCkmeK+zBUB/E5E3nMvNV28QGW/+AVlfvnzFJS4eK66M+ftYuPRkgUxlJcWOaovPpz+exQTPsAE9ECABRA9zDKPkQkIAl29A3TiUiPtOlZuZ4K+KjeOluTEUAhTYgJMgAlohIAWI0A0gpa7yQQ8nsDpS9c57ZXHzxJ3kAlML4GGA3tt4kdMRCRHfEwvXq5tHhEITk2Vo7n66z9Q/v9I0W06LPytMDh0U0RaD4+aXRZARiHhA0xAFwRYANHFNPMgmQCRwcuL/HwNFBzgY4cjIkiEznJhAkyACWiIgNYjQDSEmrvKBDyKwETED+wYHxocSXsx3kCQtgw7RrkwASbgWQR6Gmptaa9Y/PCsueHeeB4BpMVCSqymigpq2LmL4h99QpcpOYfEd4BL1a104MwN6u0b8fwID/GhO5YmU3o8b//0vKeXe8QEZpYACyAzy5drZwIeQyDAz0h3LEsipLxSCnY/hIs8mCEOoojHdJo7wgSYABNwQoAjQJxA4UNMwMMIVLd2UlVDBd25Ko9iTMYp9c7R8+OJJ54a0/C8paWFPtm7j86ePTluu+auPvIPjKQtnyumdevW6XKhaFxIfAETmCMCMD2/ceoz2ToWdTnd1RxNBDerOQIZwiOnfO9eMlw+QrE5azTX/6l2GNEf1+o6aOfhq9IEXSnpcSGUkxzJAshUAfP9TECDBFgA0eCkcZeZwGQI+Jq8aWFyuHw5FjZqdSTC75kAE/BkAhwB4smzw31jAtNPQO35AfEjMyvbpVAB8WPnH3bS0d27KSTUl1L7ol12qKSvmnYdPCrPR4T50RrhLeBF/D+MS2B8ggnMMoG+pjpb6qtZbpqbYwKaJiAN0kWpO1FGYWlLdelPZjJ4C8Nza/YLH5N16RPvfUyanlruPBNgApMkwALIJMHxbUxAawT6LENU29xFzR09dl2PDPGn+MhAgkDChQkwASagBQIcAaKFWeI+6pmAwWikoEBfSg6Io1C/qX2/UKe9moj4URyUT9sKN1Jg9IhZsjInXY1tJJJi0LHf/4TixGJIUGCgnqeLx84EPJZAZ1uV7Jva9NxjO8sdYwIeRiBaiPqXDxyga1WVlJWdoyt532QQJujC5zRCbITot5BN9DAZDZQWG+xhM8XdYQJMYDYIsAAyG5S5DSbgAQS6hQn6x59V0/6SGvElwJoHEzshClKj6Jlt+SyAeMAccReYABNwjwDvz3aPE1/FBOaKwODAAIWZTOTn7TelLkxW/Lg7Yjltyl/pVPxAhyB+/Gj/i7Jvt29cTQ0XWsXuWN8p9ZVvZgJMYPoJmCtbiFKSp79irpEJ6IBAkI81+qH9RiX1JiWTv79+vD+R4SImzF++lIINVPw3hA4efB4iE3BBgJ3+XIDhw0xgvhEYHB6m1o5+amjtpoa2HutL/NzTN0A4x4UJMAEmoBUCHAGilZnifjKByROYrPiByI+xxI+yxlopfjTWVtPm7BVUOJQ5+U7ynUyACcwYAfh/dDU3U5S3gby9pxZJNmOd5IqZgIcTCBW/O+11DdTW1ubhPZ357rH4MfOMuQUm4MkEOALEk2eH+8YEppFAoDBBX54TTX6+9r/2MEX3FWboXJgAE2ACWiHAf8BoZaa4n0xgcgQU8SMyewM98cSX3Pb8GC/yQy1+PJS9lbKjU6imvXFyneS7mAATmFECwzd5u8OMAubK5z0BxQcEA+3uMtPg0DAZvPWxBxr/ewyJ8SILhroYRGosH7H2oRcO8/4h5wEygQkQYAFkArD4UiagZQJ+It3VmkUJtDI/zm4Y3l5ehByZXJgAE2ACWiHASyJamSnuJxOYOAG1+PHVZ90XPyYS+aGIHxPvHd/BBJjAbBEYGrRfuJytdrkdJjBfCPT3jnh/dvf0EX6nDN7WtFjzZYyuxgHxo6K2g45frKfefmv6b1wbHepHyxfGSg9ULkyACeiLAAsg+ppvHi0ToMFBh3RX8n8BFkD40WACTEA7BDgCRDtzxT1lAhMhoIgf4ZnL6StffmzMyI/Ozk7a+YeddHT3bpKG50ucG56jfWeRHxPpF1/LBJjA7BMwGk2z3yi3yATmEQF1BMjAgHAC11EZHLpJF6ta6JW9ZdTZ3W8beXJMMMVFBrAAoqNngYfKBBQCLIDws8AEdELA3GOhzy410PmqVrsRIwXWmoJ4CgnUx24QnUw3D5MJzGsCHAEyr6eXB6cjAv2WkR3epVVNVHKxgiB+PP3005SzMJdgYuqstLS0TFj86KvtIY78cEaTjzEBzySgtwVbz5wF7pWWCagjQLQ8jsn2PcjPRHHh/uRnsqb79hEZMeIjAshk5GXQyTLl+5iAlgnwb76WZ4/7zgQmQKBvYIhOXGqktw5esd1lMnrRqtw4WpITQyETqIsvZQJMgAnMJQHnS6Jz2SNumwkwgfEIqMUOXOtjMspb+oUGcvFKM5k7blLK0hVS/MjNzXMpfiDyY8+e3TMS+aGn9CDjzRefZwJzTYAjQOZ6Brh9rRNQR4BofSwT7T9SfOekhtOTWwrsbvURgWVJUUETrY6vZwJMYB4QYAFkHkwiD4EJuEPAILw+wkN8KDMx1HY5dkMkRgUTznFhAkyACWiFAEeAaGWmuJ96JWAwGqn7NiPtOVJKDe0jqSec8bhWc4Pah7xo7S3xY7zID4gf+9/Z6XbaK3ciPzqpmdraWoVh6pCzLvIxJsAE5oAAR4DMAXRucl4R0HMECCJI4fMRHeZvN6det/E2qnn1kPNgmMAECLAAMgFYfCkT0DKBkEATbVmRKk2/LANWIzCEfwb7myjAz7oLU8vj474zASagHwL8p4t+5ppHqk0CgYGBVFy8gcLCwuUAjMPdNOAl0k6YvMhiGfEiw/EFKSso/3Yv2lC8cVzPj4mKH4211W6nvbL0d2sTNveaCcxTAhwBMk8nloc1awT0HAEyPHyTkAK8oa2HBoQhulKM3l6UEBVIAb68/jFrDyI3xAQ8hAALIB4yEdwNJjDTBAziwz4pJkhEfATaNXWb2B3Bi4kzTZ/rZwJMYDoJcATIdNLkupjA9BPw8fGhNWvXUlFRoVuVGwwG8vX1GzPt1a5d70575IfSuWCKpNCwKPL2tuYJd6vTfBETYAIzSoAjQGYUL1euAwJ6jgCxDA5L/9O3Dl6ltq6RSFR4gDz+uYVUlBmlgyeAh8gEmICaAAsg/DwwAZ0QGBQ7H2qbu6nV3DcqAgThob63zMF0goOHyQSYgIYJsGir4cnjruuGADZeBAcHT3m88PyYqPgxkcgPpYM+Pr7kbeAdoVOeMK6ACUwTAY4AmSaQXI1uCeg5AgSTbu61UGW9SHLZ3iuMz0dSfivZMHT7YPDAmYBOCbAAotOJ52Hrj0BLZx/tPl5Fe05U2Q1+2YI4emZbPgsg+nskeMRMQLMEOAJEs1PHHWcCEyIwW+LHhDrFFzMBJjArBDgCZFYwcyPzmICeI0AM3rdRSlwIbV6WSr191vTfmGp4okaG2PuCzONHgIfGBJiAigALIPw4MAEdEWjt6Jd5MJXiY/Smnr4BGhweyYupIxw8VCbABDRKgCNANDpx3G0mMAECLH5MABZfygTmIQGOAJmHk8pDmlUCeo4Agdl5TnI4pcUG0+DQTYIgohT2P53Vx5AbYwIeQ4AFEI+ZCu4IE5hZAr5C7FieE01+vva/9lkJIYRzXJgAE2ACWiHgyREgLS0t1NHWRpHR0aPS//T391N9fT01NDRQZ0enMIO2aAU59/MWgcAg4aUVH0ex8fHk7887CGfqwWDxY6bIcr1MQDsEJhMB4o6Pz9DQkHYgeGBP1Yw9maVW+ulqitH/qfLVcwSIl/A5hegxZPAiYTNmK95eXgRxhAsTYAL6I8ACiP7mnEesUwJB/iYqXpxIK/Pj7AjgS4AP+3/o9KngYTMBbRLw5D9burq6qKKyinz8/GwCCISPK1eu0LlzZ8W5OmptbaWW5gFtwtd5r03C3Dsq2peSUxJo8aJcysnJGSV06RzRlIcP8WPP7g9nzPB8yh3kCpgAE5gVAhOJAMFCb0NHu9v98vf1k9dGhobRdCw0u92whi90xhgcwdCTilb66chMeQ6b29tEhoZeeRp8g8T3DkRyTOY51XMECPxPK2o76PjFeurtH0mBFRJoolW58ZQcE+Q4BfyeCTCBeU6ABZB5PsE8PCagELhN7IIYtHCqK34imAAT0D4BT44ASUhMoqioKDIJQ2UURIQc/PQgHTh4nMxmL0pLzaPCRcViET1B7EgzkvHWtrSBwUH5M//ruRwGBweovb2JbtRUUOnZMrpUWiLmspBu33Q7JSUlE3YbcpkaAUX8+OiNHVQclE/blmykwGjni2tljbX0o/0v0mQMz6fWS76bCTCB2SDgbgQIFobfOLCX/uGnvxa5/kdS/Y7Vx/TIMEqMjqDo+GRaWZRD6wqXUHJs/Fi36PocGO89c4q+9fyPbIwHLP20ZvUK+uVf/6XHiCBKP//yhy9Qa1uLbc6efmgr/d2TfySFBE8r6HNPl5mOVpbTx5/upysXLtOJilrJOTwsgjLSE+lzyxbRtnUbJvyM6jkCBGmvLla10Ct7y6QJulIyE0MpVaTFYgHE034TuD9MYOYJsAAy84y5BSbgEQQ6u/rps0sNdL6q1WYEhnRYKTGBtHFxEoUE+nhEP7kTTIAJMIHxCHjyMrPBW4Tai9RIw8M3qUGku/rDzrfpxMkbQvgooOLiVRQTHScWyg3i5UW3cQj+eFPtUecxXVGRcWIxIp86Olvp8uXTdPjop3SjroUefvAeSs9cINIteHlUn7XUmcmIH321PfRQ9lbKjk7R0lC5r0yACbhBYCIRIKiuubmR+vrdE0BaWhrpRBnuOkwvve1PBalJ9LUvP0YPFm/yyEVyN3DNyiWdnW3Uae6wtWVpaZ2VdifaSENDrV0/O9r7JlrFrF1feeM6/eCl39GOj/aKTTONdu3i/ZWrF+mDPR/SzzPfo795evuEnlFPFHxmDaxoKMjPRHHh/uSnynYRHxFAJiMvg87mPHBbTMBTCPBvvqfMBPeDCcwwgb6BITpxqZHeOniFBm7lvjWKHSfFhQm0Kj+eQma4fa6eCTABJjBdBDw5AkQZY1NjA+14400qOVtHK5Zvpvz8FWQy+dBNLXR+uiZqntWDuYNohd2aEeFRtHzZHRQeHkNHDr9Pr72+gx5/9FFKTc/gSJBJzPtkxA+O/JgEaL6FCWiIgLsRIFP1SRgctNDpq+X0p8//kC5VVXlspICnTZ3Bm5eSpjonSHf1jy/8jH73zvvjVgUh5C++/x/UZjbTM/fc55ZQp+cIEJPw/sjLiKRnAwqoX2W5FxRgkMboXJgAE9AfAf7U0t+c84h1SsAgdhuHh/gQwj7VJTEqmHCOCxNgAkxAKwQ8OQIEDOH5sfeTvXbih9HI4odWni93+gkxBEIIokGMRl86sP8tevsPf6CnnnqKIiIi3KmCr7lFYDLiB0d+8OPDBOY/AXcjQPB/saviapF+cGjEE0D5GZENP/71a+QfEER//cRTrqrk47cIqBkylMkRePWjD+jND/Y7vRnPriNjRIT88KU3KD83nzbkFji9T31QzxEgSEsaHeZPUSFWvx+FC9KCe/rfEeNOLF/ABJjApAiwADIpbHwTE9AeARh+3bM6jZYvjLXrfHiQr0h/ZdLegLjHTIAJ6JaAJwdRIPVVSUkJnfyslBZkLZvVyI+bYlWe02rN7q8FFt6SkzJpmYgGOXjoD9Lv5a677yIfYVrKZXwCED8+2fsJwfOj0DeZPT/GR8ZXMAHdEJhKBAgWj4uLCulbf/Ql8g8ZiXPv6bCmb6oQaYcOfnqEPj5TapcqCSm0fvnqTirMyKC7V6/TDevJDNSVuDSZuvR4T3V9Le3avc8ubVtwUAg9tvVuukukTMVze/7Cefr5a+/JNFhKqblRRW/u3E2r0jLGjQLRcwQIvo+beyzU0NYjsl+M+KD6mQwUI9JiBfga9fjY8ZiZgK4JsACi6+nnweuJAPKSx0cGyhcXJsAEmICWCXjyzq0uYWR58OBB8rotggqL1onogJmP/ED6D3hSNDfXymiEyIhoCgpybhyt5Xn31L7LSJCMXLpxo1ya3S9bvowSEhI8tbse06/JRH5w2iuPmT7uCBOYcQJTiQDBzvnYxGjaVLSE/AODnPYVaYQU83QsKisFP7++Z7fYNJbntrm3syiUqaTmmu76HAE41j+ZvjpGJzi2MZX3jv1T6ppMP531w7H+6arXWVuujpVWVtDJy1dtpyEo/cnj98sUbMozu6mgiNITkuiZf3jezh/k1JnT1NDRTmni2R6r73qOALEMDkv/09/vvUS9liEbZ3iAPP65hVSUGeVqavg4E2AC85QACyDzdGJ5WEzAkcCg2PlQ29xNrWZ7E7hgfxMlxwSxcasjMH7PBJiAxxLw5AiQ8vIKulHbQunpGygsNHLGGcKYe2Cgjw4d2kW73v0N+fr606OPPSf9KRz/wJ/xzui4AT+/AEpNW0iVVaV08cJFiouLZy+QMZ4HjvwYAw6fYgJMQBKYSgSIGqGrBWIsDj++5V7pqfB3P/iZ3U783Qc/o4c3l44ZBYLPWNSNnfxYzG7rbJfNhgWHUl5aOiXHxsvPYVftO06zci18IUpra+jG9Wt29cWEhMqFcXfrc6xfea/ur9LXydQ93REg6vE31lyna+ZOG1P0PSEphdLCIybMVc0BbfSIjSrl5VfpzI1qO74TnS9XfCdy/N5Na6i+ppGud/RQqPcwPbq2eNQcQ4grTE2ivSqD9Lqufrea0XMECACZey1U19pDze29kpfJaE37bRkYSYHnFki+iAkwgXlBgAWQeTGNPAgmMD6Bji4Lvf1pOe05WWW72M/kTcsWxNGX78mlGJEjkwsTYAJMQAsEPDkCpPraNTKbvSgxcYFcAJ9p03Ol/u6uVqqsLKXwsAjq7enWwjTOuz7GxSaL6Js4OnvuLK0vXs9psFzMsCJ+vP/KK1QclM9pr1xw4sNMQO8EphIBMhF22++8S6Yi2vvZZ7bbOtpb6cjZM2MKIOdKTtMLu/YQduOX1dbbUmkhjVFMTDwtz0sVIsoWGYXizk78SpGW65fvvEmf7DtCFc1tth3/E6kP4sl3XnhRpEy6YhvLPVs20jceekwKNUr9Sn99ffwpMjKaipctpKcefsQtXwml4umOAHEcf3dXl02UQj8DAgPld5yJclU/C+8e3E+/eeU1OlFRS/X1NdJjA3yz42Pp9o2r6Zn7HpACi7pcOH+Wvv/G61KoUEpWZhb9/VefHjNCCLx/8NLv7OZi2bICem77F+V9967bIJ8vzFlPXy91NTeLaNJMtwSuuED30my689zZDXYevTF430YpcSG0eVkq9faNCB7wRI0M4XWPeTTVPBQm4DYBFkDcRsUXMgFtExgcHpYf/p3dIztGOsUaWU/fgLYHxr1nAkxAdwQ8NQIE+Ybb2trkfERGRM24+IF24Psx2YLoEWsdzmvA+YlUr9Q3Vp3OWyLhXTJ2X1zdpxyf6v3j1T/eecy9v3+wSD0WTu1tJeNdrtvzavGDPT90+xjwwJmAWwSmKwJkvMawGF28YY2dAIJ7Tp48R83b25wucv9u97v0/7/4qp03g9IOzNTxqqy8Qogk+dZT2+mZez/vtB7lnvePHKTnf/wzOlFWNqq7Sn3wgXh372GZJklZRFdfLKMbxEI6BBl1PVisxyL+d37xS9p5+LidsTY8TyAEvPZuDR04eZH++ZtfllEx7pTpigBBvyFMKON3Zv6NfuIFE3BwQF//7KtP0tcFV3eKua+DfihEjJ+89KpdKincC74nyvAqk3P+V9/+pp0QFJ2YJMUPtUBWUnWdHtgmxK2ISKeCBcZ0sOQU/f699+08ZiCABN3yCFMiefD8eXuLiGGR6kod3YM6UE5cLCW0py5ZOemEqJ3xooH0HAHiLdJ/56dFUFpssB07g8GLfIxWtnYn+A0TYALzngALIPN+inmATMBKINDPSMtzosnP1/7XPishhHz5SwA/JkyACWiIgKdGgGCxZmCwj3x9AsQfs7PzFWs80/OBgX5qaKyjnp4OCg2Nkjsd68RiR1XlBZFepF96lISKVF2ZmfkUGBhKg4MWQv7zhoZr1NnRajufkpojU3o5toc/vltaG+hGTTm1tzfLOlFQb0JiOiEqwpUfSV9fN1VfvyrurbDdFxWVQGjLX6QmaW5pkv2OjIynkOBwu5ReEH7aRHvXqi7ZtRscEi6jb2Ki42QfZquAi8HgLdts7vWi/v5+jgBxgN/T0yMNzxH5AfHjiZWfp8Bo5141ZY219KP9LxJ7fszWE8ztMAHPIzBbESBYaIbpOT4fsRiOgsgApCWCoIDFafVCM8SKf/jpr+Vn5VgFdWDB/rv//iL1dIs0W8LbwdmOfNT3lz98QSzsjxY/HOtH//7tly9TR3sffe+5r9vV52oxHNEgf/5/r4wSeJS6lUgOCCEYF9JiuWMAPx0RIGCPyA+1+ONOvWD/by/8hvLS09yKWoFwZPlgv12aM0e2eA+Rw/yd79ILf/dXlJu/SF4CgQJRNAfOlNjEI8zrx5/ud9k20mwdO3PJTvyIEP5sd6zf4PQZcDZ3qGPvmVP0k1/91k60QT2ILHL2LDmOyZ1rHO+ZL+/xt4KX+G7mI0zPHQuOc2ECTEB/BEb/b6A/BjxiJqALAv6+RipenEhrFtkbsyI8FDskuDABJsAEtEJg8jEPMz9Ci2V45htRtTBeBIi5q5M+/ugVunjxNC1fvkFGKBw6/AFdvnScenutOZFj49Jo48Z7aeWqe6Qwsm//O1RzvZRaRDoGFOX8pjsepajIOJsIArHj/PnjwvR9J5WWnqT6ukpbz/z8/CgmNp3Wr98q6t1KCfEpduJJa1sTnTzxMe3d+5YQMc5Ra1uLEOj9ZVuLl6ylBVlLRN1HhLH4Ndqy5VFavXqzEBgCZEQKhJPy8gvi3tdHtRsRGUmJSXm0ccN9tGzZRinqOIo2szpB3Jgk4Bj54Y740VfbQw9lb6Xs6BSmyASYgA4JzFYECND6h4RQcHCY3YI1PiMrW1soTezMVwrSGmGxXi1+QDiBl8Odq1ZK8aCkvJx++epO2zWIXHhxx3u0elHRKGEB9WGBG1ENSkG6pwfu2mBX33//4SPbNRAIEFmwsijHLlpDiRhwfFQOn78oF/7Rz8e23i3vU/rpWC/Ghf4oHiaOdanfT0cECBb5kZbr9NVyu/HfvaKItm3dLPuJUiFEklfeeNcusgWCzVgihLqvirC1PDub7rlnsxS8UD44cJR2fLTXTmBAJAhSm31PpKNSBITbc/IoLS3Lbp4QLVJ9X+2olFmYB3iM7Dp4VN0F2rJuKeXFJ9odc3yjCEJK6ixEfkBsQQHvkNBw+tuvfkmmVXOn6DkCBP6nZdfb6PTlRurtH0mBFRJoolW58dIDlQsTYAL6IsACiL7mm0fLBJgAE2ACTEDzBHjf1sgUjrW4jw1uPcIPBOLH6dP7RKREk4wC8fH1o7vuflIsiHTT6VOH6HLZKbGbtF6IDdXU1FQnzxcXPyjPX7xwms6WHJTnY2LSpBDh6xsgd8NWiuiLt995kT7d/zalpWcJ34v7RLRGrOyc0mZDPaI7+sSCw1dskSAQMErOHKRXX/0BVVZcoQXZS2jN2nvF4lM4VVReouPHPpDt4l4II0VFK6UZLozGIbpcvHhK3PtTOabExGRbu0p/jx55Xwo4uLa4eJvsL5e5I8CRH3PHnltmAlomMFsRIGAUJdIYQrhXl85O4c3QYY0IUY47W6z/s2efpG8/+LA0r0ZB9AQW2L/1/I9sIggWsV94ZxfB0BoRBUrZKVI/IbJAKVjkfu7Lj8gUVzEivZJS3+olS+h/isgEJbUVFvRf3rGTNq9ca6vPWRQB7lfEj//1rWfomXvus+vn54sW01f/z7/YCQvoD/oF35CxijuRGmPdj3MNHe0y7ZTBYJKXok6IH//yN39tJzzhXLoQop75h+dtggCuralvICzyuxPpsGnpUvr+//iGjOxQxCIICRCE/uL7/2EngkAUkSmuCork9x14c2xaWmgvgFy+SqWVFaMEEFz/yaVSam4e8QyBqAWBTD33rthAdHMUZXDt4swM+tvnviafL/Tf1Xyr63WHi6t+aP344NBNuioEkJc+KKVWcx8Zb6wbDEYAACAASURBVKUUS4oOpFSRFosFEK3PMPefCUycAAsgE2fGdzABTRLo7Oqnw+dq6Wx5i13/c1JCaePiJApx00xNk4PnTjMBJjCvCHhyBMhsgx4vAkTpD8xEEaGRl7eMttz1RUoTaaaGxOLBgqw99PLLz1PZpQsiIuQdKSZs+/xXbOfPlBwSERc9UiRBREZh0TopRPT0dNK5s4eo5PR+afa6detX6PZND8tUVcPDg1Ic2bEjjD7a8wodPbqXCgrWU27uMhmNgdRWiDJRxI/t278lozV8RQQI0nUhYuWD939D169fk6anKMpCGM7v3fuGFD/S0vLogQf+mJYtv0O2C6FFLY4guiQ1baEYY+FsTwu3d4sAxI8P93w4obRXHPnBjw8TYAIgMFsRIFhIDhTRg47FYhmgts52eVjZmQ+DcvXC/5r8hfT4nZ+TooJ6QRqL1I/fe4a+98Kv5f0QNk58VkKltTU23wiYX8N8XV0fogxgxA3xQ13fhtwCGbmASAnl+pNiAR7+EDDTxrWuIkDQNiJUIH5gQVxdb0HhYvralx+jsud/aBf9cvDTIwRz+LEW7KcjAgSG4xAlrpk7ZZRHQ0uLTBOF4+p+YmyISkmPDLMTKlp7R3b2O86f+j3SRn3rj75EGC/qVeoGjweLN8l0VS+8+ortFghWR06dsqW4wvzeVbzKTpiACIUIEkeTe8wr+EF4UkpBahKtK1wyrnCBfkF0G7CMeHYqz0+d+Fv+yNkzbkXnKO3qOQIEDIL8TFLsCFWtc8SGB5DJyMug6t8P/pkJ6IUA/+brZaZ5nLon0DcwJMWPtw5eoQHx5QoFOyGKzQm0Kl/kV9c9IQbABJiAVghwBMjITI0VAaJchYgOFKSHWr7iTspIz5d+FTdv+lDWgsUUFZUuBRDsfl2yZIMUP0wmH3k+XVybnLJACiDNYkEAESUR4VGyPqTTWrFqC0UIw/clS++wHRdLRZSclClEj1V0/OhuGcnR2FhDOTnWlA01NZdlhIZRtIF0VxA/kHoEBamyijfcLyNBIICoy+DgEOHeCxcOycOIRlm9eovtXqTIys9fIaJY7pdptS5dPE5lZafleGbTD8Su0zp+A/Fj3yf7JiR+sOeHjh8YHjoTcCAwWxEgroQDeGKpC3b7VzS32R2DqbXjYj0uQJ1IeaX4ikC06GhvpfMXzstFdZxvrLlOl+vtN6YhysCZubWsT0SBIAWSkhIJC/BIt6X4dagFA3Un/f0DpGeEo0ijXIOF+ez4WGkEjoK+nq9rk/1zFGLU9U5HBAjqgyhRoKpYLebgZwgK6MsfhLm7I39LSyuZ4bklhIyxSqEQIBB944yRIm44GpZfFmmslOgS3If7i3Mz6a1PRyI79gpRC+muFL8QzBNEqQMXrtp15/aNq50+J876DNENG0vwQho2RJJATEF6MghqEOGUSBBn96uPjcdlvPu1fN4kzM7zMiLp2QD100VS/HA0RtfyOLnvTIAJuE+ABRD3WfGVTEDTBAxeXhQe4kOZiaHUb7HuloEpWGJUMOEcFybABJiAVghwBMjITLkbAWIyGaXQEROTIhde4KWBgoWR4JAwIXgYpWcHjMuRikI57yfEk6DAYClW9AszWKSVwjl//2ApPuTmrRL3muQiT29vt6zTagY/KIwnfeV7/AHf1d0hI0NQmppqpb8IPD/g9YHIDxTUC0EnMSGVFhWsIKSyUhdEeFRVnhciSLVMfZUtBBX0A8IIipf4LPPyMsioD4wFqbvgaYIIFhZA7FDO+BuYwEP8+MmPf0QxvX4UXxBDl2sqifByKO2Wbvrtsfeok5rZ88MRDr9nAjomMFsRIEDcdcvzSo0bn4+KBwWOI0JBER/wHhEQMRERcpHcWXHmK3K5/Ibt0jMi7aQ6TZLV48H6uemsTqTpcoyAUC/SuxJysMEA0ROuCgSXrJx0uzRYDQ21MirDfunYvobpiABBjYoogf7DEwRpsSA2Qdypu9Ek0k5dIUS7YAOGWnSZSPtZmVkU5OPjCgGlBAWP8oCpr2m0E1cQDbNu/Wraefi4rR/YqLGvqsomgKD/iNJQPyeK+TnG50yAcewU0ppBlMK8QNzZc+yQNKdXfGeQBg0+NOizIrw41qG8d/Ycubp2vh338rqNYsL85Uv5u4E3UM23WebxMIGJEWABZGK8+GomoFkCMPy6Z3UarSmIt43BKMzPA/2MIv2VNe+qZgfHHWcCTEBXBPgPmJHpdicCRLkaQkdAgHPTRwgc8Mrw9w9xaRoO7w6l4A95GIyjIDKkouI83ai5Sl3CdB1eHCiVFZeF+XWbLUIDx5B2q7WlnnqFKAERIzQ0UooWiuCCa/A+PDyOwsMipAeIUnrEIlNTU70tNQTa8zHZ52zHtU1N1gUmpC9B3zo6O2z+I7bK+IcZJdAlUq59+NGH1HKtmmKis+n9lhNE9hudbe0j5dX1nhv0UP4dbHg+o7PClTMBbRGYrQgQhQrEenXB5yJEDBQsXJv77CNCsBj//Au/pZ+/9p5LsGqBA9eb+zqkYIKogzaz2S5NEs7DLP2tj4+5rO/69et259SL9K4W1xVvE2fncQxRAomxMXb1QmxQ0n+56sx0RYCgfjDZe+YUvb5nN50orSIIMIpx+Vjth/i7FjWU+xRhyVU0BBggBVqcSJNUo2rMLD7HesTGDyJrhCq+92wT6cYw34ppPSIzPv3wI3pk3XoZLQPxBhEa6rJcRPXA/NwZf2djs6Yds7aJPj++5V552Z+q0pQ5M2p3VperMTu7dr4dGxaCobnHQg1t9gKlQax/xIT7U4Cvcb4NmcfDBJjAOARYABkHEJ9mAvOFAD7s4yMD5YsLE2ACTEDLBDgCZGT23I0Ame75xh/ydfXXRZ7rN2nfvnelSbpS/AOspuOI8oAI4VgUgQTHTbeiRNTXIJIDwgjqUQsgg4MDoj6LrBP+ITte/6lj1bb3uA9RLSjWqJWbLoUdl5XwiUkTGBwYEJFDvvSVVffRioQCCh9jjepI0DXaU3acFogIJS5MgAkwAYXAbEWA4PMM0Q4Q7NUFInxaeITtUE+3edTkIK2Verf/qAscDvS2dshd/YphuuP1E60Pi/RKcRUB4tiGs/f+YnMEhIKJiBoTicBw1qZyrLq+lv7xhZ/Ru3sPuxQ9EGUKQQp+Zoq3BtrvFZ5kY0V2jNWuI7egW55jru7Bc4KoDEcz9BMVtdLbBQLIwZJTdK5qRKSC+fkjt48Y1buqe7zjiApZtmAn7f3sM9ulSL8FwSV5jPRfeo4AsQwO02eXGuj3ey9Ru/BPUQo8QJ6+t4CKMq3pXMdjz+eZABOYPwRYAJk/c8kjYQJjEhgcGqba5m5qNffZXRfsb6LkGHzp5TRYYwLkk0yACXgMAY4AGZmKiUSATOcEdnS2SrPyt958QUZzLFy4nBbmLqbIiDjpDYJSWVVKb77xn3bNqhe0jEbn0SjKDWOdR0qPlNQCmb7LVUEKrgXZRRQSEs7ihytIM3g8TKRGCzGaaEFiGgUZRkfqKE1XmEcii2awO1w1E2ACGiMwmxEgSG/lGHGQFOJP/iIN5Hg797HI7U6Bp0hHj725tbP7JlIf7keUgre3vWm6s3rHOgZxZyLiB+qa6PXO2scC/Q9e+h397h37lJcQN2JjE2l5ejxl5S6gnNRUSkhKof/5ne/aUnWh/XC/8Zezxusn5hfz7E5xZoZeX18jDdNXpWXQR0ePOTU/H6tutXDl6lmDyIM0XmoBBBs9KltbKC0hyeUzqucIEDA391qoqr5TRIKMbMZBCnDLgDUl61jzwueYABOYfwTG/8SYf2PmETEBXRJo6eyjtz8tpz0nq+w8QJZmxtDXHxSGeyI/JhcmwASYgBYIcATIyCzNRQSI3C1bdYkOHHhD7phdv+EL9MjD36CMjFzZMW+xcIFUV/DdgM+HuqgXtAYGzCKaw16UV67tt8BvxH63rcFglH4jiOyIjUuj7du/SZmZ+WM+smjPaLTmVB/zQj458wSsJi/27ahzn818D7gFJsAENERgtiJAsAj/4cmzo8jA4Nyajshqao4oCXWBUPHclx+hR9cWj7p3rAOoE5+jt4m0k47lz555YkL1IXWTYpruKgJE+nCJiExvFwvlGH9Hu/1nsaP/iWM/8X46IkCQ9mrHR3vtqocP2LefelCmm8LYsIiPsVUKkcoxSqO1d/yFbPSzpr7BZmjubCwQka532KdKQlvOhBFHM3QILCUnPqOjwqQe6bvURTE/d9YmjoE9ojgUD5rxPD3U9QxY+qmnw2pcP1b9rs7N9+MG79soJS6Etq3JFBt1Rp4TeKJGCnGTCxNgAvojwAKI/uacR6xjAvjw7+zuF7sehiWF/oEh6hVpKrgwASbABLREgCNARmZrLiJAYGYOnw2kuEIkxrJlG2nBgkVCmBjJc9QrFiVaW+tkdIhaBIE4EiLuwbGe7m4hyPdKc3T1wg12ySpG6ern0mgwiPbCZRoMiCO4F+07FqypQxiaCzaOfdHre4PRRW5tRQRh4UOvjwaPmwm4TWC2IkCwCH9CpBNSFxhXr15UZHcsOSqCIHooKZjwWQUT9ILCxW6PCRdC/MBnXlpEOCG1kxJ5oggKWAR3JWY4NqSOGHAVPYCNCkjxlSvadVaQkgtG4+oSExMvTbbHKuNFVox1r3LO0TAcfCF+fOOhxyQDZUzKv44ihbsRIIpXCiI4HDlJcUVEUqhTbqJ/sYnRo9Jr4V7FDP3942dsz8KBC1fJ7/XXCKboShnL/BzCxw/feJ0O7D9MSGNWJ1I0LYiNoJ9/539RcuyIX6dSl7M5UnvUuGKt5wgQZLfIT4ugrESrX53CyFukWTUZOPOFq2eGjzOB+UyABZD5PLs8NiagIgCz8zUFCYRdD7391i/Afj7elCg8QXCOCxNgAkxAKwQ4AmRkpuYmAmRQiA99UtxAjvTAgBBpXI6irGs3tzRR2eWzMl+3WgCxGpzHE0xZsSsVRuZFhWuleKLcaxZG6lWVF2R0Cf7AV4qvEE2Sk7NlmxBfcG9v71pp3q4urW1NVHLmoDyUtWAxxcUmub2YZFcRv5k0AXiAuCwsfrhEwyeYABMYITAdESBK9IAzrljM3n/hHP3kV78d5eOxZd1Swk5/dSlKSKbIyGiquVElD0MAQOTI9jubbZEi6usvnD8rhQcYqcNLBNEESkQJ2s5LS6fs+FiR0sm6ix/1nTx5jpq3t0k/CfVCPRbpz5Wcdlkf2nUlmkBg+eDAUdpUtERGU6gL7oF/RYnKtwLnkf4rOtF1aiVco44AcdW2XWMOb3q6zKMiTxDhmS4iVdTih3KbM5HCnQgQ3H/y8lU6cbGU7hVRJY4FnJHCCv4r6rIgI3MUL+U8olNeeeNdWzou3PvmB/vt0l8V52aOaX5+ufyqXUorfOeBh8jjDgIIWKDv6jkC+/TIMPlcOQo66jHo2QMEfysMDt0UczJEg8PWzZ9gYxACiCHQRF7E26kcfxf4PROY7wRYAJnvM8zjYwK3CPj7Gml1fox8ORZv9v9wRMLvmQAT8GAC/CfLyOTMRZQDojggekDYgIjR0FBJPT1ikcc/WEZzNDTWSXP08qsjZp3dXa0yLRYEkISEDIqJTaezJQfp7LnjtGTpHYSUFziHek6e+JhKS0+Kn7spRCWAIKVWQmK69P44fmw3nTixn9LTC4T/yBJbmivcf+jQLnr7rX8XXiQx9Ohjz0kBhAsTYAJMgAloi8BUI0Cw6//9IwelAKEuSBvU1tlOx85col0Hj9oEDeUa7Nx/ePMWO1EDi8wZYkG8eNlC+t0tAQTXHz5ynPYcO0SPb7nXtmiPBeuGlmb6zi9+SYgSCBCplCDcQ1T4q29/kzbkFsimkN5pSdFi2yI6jmGh/tWPPrCLgFDq+76IGMAiO+rD529coA/97XNfo7tXr5P1jbUQjjRTK4tynPbzJRG5oDZyx+J68YY1TkUd2dCtAsEG0QtYnFcYg62at7P3iCxBlIujGINqLZYBKikvl2NSRBBl/Oino0jh1+uehxREIAhdEJ0UzwxFtNl77gz99x8+svM0kRFAIqWVMyEG/USUBtJbnSgrkzTAQh0Rg0iWbVs3u2SIsd+5aqWdaII+/uzXv6ewYGG0LsQqJVoF/fvH/3zZbo7Q1njptdAvZ4xlh3VQhoT/6aXqVtp99JrwyRnZlBER6kf3rkmj9Hj7/xd0gISHyAR0T4AFEN0/AgxALwRuDt+k7t4B6lZ9AcDYfYwGCgvyodu8eElRL88Cj5MJaJ0AR4CMzOBcRIAYDCYpROQsXCGFiH373pUCRHh4HHV1d9DlsjMiDcRVSkzKk2muGhpq6cyZYxQTk0Z5+SspLj6VFi9eRxcvnqCjR94nHx9fWrZ0o/QMqa4uo9OnrdEbMEBFlIlSbhMfU8lJmbRp0/0iRVaFuG6fPLV8+QbZNsrlK6dEnz6gmppq2rBxqWgzxeWuWKVe/pcJMAEmwAQ8j8BUI0BgGH3gTMkos24lcsFZCicsXH/rqe1yAdqxYDEZwsjug5/ZFqMh1P/F9/+Dqpta6PE7PydvgZ/DC7v2SPED6bLwgsCQtHSp3LGvFNT3wLYtdiIMFsGff+G3QkBpkV4g8PhQ6lMiDJQUXHHZ2XJBXynKgr5jv/Ee7f/DT39t109EVEBUeO3dD+1uWZyZQZ8Xwow7BQLA/d/4U3mpM644puYsxZWiQvrNv35XigMLMhLkfco1GNsvX91JQb4m6QGCUlpZQb955TXaefj4qLpgKo/0UO4s9ONZ+JO//yf61h99ycYNERcQHa5ctQoZyjgQAQRTc1eiElgjRRqEErV4pDArSE2idYWjnyHlPP7F+TX5C+2iQMDzmX94nhA94hceQohwOV96ZZRIl5W50C2vGD1HgCDV97W6DvroVBW1mq0eN0Yxb0nRgbQ4K4YFEPXDyD8zAZ0QYAFEJxPNw2QC5h4L7T5RRScvNYgUWFYjMD8fg8iLGUaP3L6AQsQuIi5MgAkwAS0QYLl2ZJaUCBApQIgdpv4BAeTnP5ISCqbhEBjglREUGEwwEncsOI57g0PCpMm4YzH5+MjzIaGR8hTaVISI9vYmqqwspR2v18u2UYzGIFq9ejMtWrSW9gaF0p49v5diRV9ft+zb8mV30Oo1d1Nzcz19euAd2r/vLZHa44Dt/ozMpZQgRBKYrEPIUBeku1q2/A556L33XqZLF48LQ/ZztnuRGgvptTZvfozuuuuLMrKEy+wTcOkBMvtd4RaZABPQKIGpRoBg2M5EDmfHcC38OP7k8fvpmXs/73JBHcLI0w9tpX/75cu2Xf9YAP/f//dn9JOXXpWkYU6t+Hoo6LFQjoV3R38HLLI/s30bffffX7SlT0J9qP/nv3vL6nkFs2shtKj7jb5+7cuP2dXnarEeog5SSyF113j9RL2PfXG7jNCYaHHG1fGY+j1EhNtz8igtLUsIEBdtzaGff/eDn0khyJGnWixBXfAEgYE50Wg/MKVCjB9CA6JrIIodPn9RRtGgIE2nIigp10MA+vMHH3b5DOA6sHY0Q1fux7+I7EGEz1gFzwKeCaS2Uoso+Hnn4ZF0XI4MMUd/87R7c+SOMDRWH7V8Dl4fQX4migkPIB+TddnTz+RNseJ9UAAvg2p5brnvTGCyBPg3f7Lk+D4moDECiPyovGGmj0+NLCZhFwQKzrEAorEJ5e4yAR0T4AgQ+8mH+LFy1VYRlSHyVZv8RIqoHJFOykt6aoSFRtBDD31NRE08IKMhIHaoC97fu+1pWr7iThFFESMXgNQFaa02b36CCgvXy/NxIioDBULE6tVbKCoqgcrKToiIjHqRusIi0lslU2pavjBFXyzbChAvHLtxo1rkTo+VdWDRIyM9n7Y/+qdUULCKKqtKqctspsCgIEpLzZO+HQ0N16QAgoLxqUt4WBQVF28T7SwUbZ+m2tpyeT8K2sjOWUKJiQvY+8OO2uy+GdMDZHa7wq0xASagUQJTjQBxd9j43Fu2IJOeeGgbPVi8acyFbywoP7f9i9K74qW3/zBiiC4W451FAqAPEOL/+ZtfdhpVgjRHEFx6us1S8FCEEyx6O4ooynggpvztV78k+6ouriJA4FsCY/Gfv/aeFBpc9RMc/uzZJ+npO+50F92krmsZHNnGAqHlTx7ZSv/0k1q78SqRM0oDEDEeuGsDJcbG0I9//ZqNO0zHESHiKCypOwbxB3NbLKJ0/u0Xv5HtOIoeyvWIrPjb577qlgCECBakuVKboaMezA8ie9wRHyCo/euff11G5yjeMqjDUfRQ+odn6c+++uSouVePV/2zniNAYHRemBVNfy5SXlkGrJs/wSZAiCLwQOXCBJiA/giwAKK/OecR65QAUl3BAH1hSoQwr7V+CcBuiIggf5kGiwsTYAJMQCsEOALEfqaw6JGUmCpfSlF8ppFWKntB4ajjygGcT0/Lli8UR39q1O3qfFBQmIjyWCM9OHp6ramqjAaD9AJRFmIgdMTFJtPA4CDhHIzMBwb6pU9Ih4gegYhRtLhYRqYo5+EFcuXyaZk+C3nOkd4KviPqvkGAWZBVKASTHBFZ0iNSZfXa6kD7BoP3qLHYIPAPTIAJMAEm4PEEJhoBgoV+ddrEsQYIA+nE6AjKyl0gUxlhN79iUD7WfTiH67733Nelp8bLO3bKyALHCA1EKoSEhstURn/y1Wdtvh+OdSOSAPX93ZN/RDmpqWPW5y8iKO8oyqMnH33EqaG5qwgQREUinVR+bj79/IVf0MdnSu3EBogLSNmEiJLxBCBEkxpV3lyO43HnfXqwNdJU6e8z99wn0jEHyVRU50Q0hFqcUPr26IP3CrP5u2S0xyf7jlBFc5utqSNnz4ziERMTb+snvkfAVwNjS46KGNUO5gps16xeQd9+8nGXc+VsbEoaK7VBuZI+y9n1jscggME/JiEpRaYjQ3o1x6gU5VlCvU89/MiE+ueOCOPYp/ny3kuk90aab7wcC6f+diTC75mAPgjwqqc+5plHyQREhIeJPr8ug+5YmmxHI8DPKM9xYQJMgAlohQBHgIyeKUfhQn3FWOdw3VTOQ+gwGETaLb+RtFvq+nAeCyZKwbnGplratetF6dWBdFeIUIFQAtFicHCI6uqv07lzR6m1rUWIK8spOjpRnHP+OQUBxyQWYxzbGG9Mownq48igMAXtaG8TolEfhYWFiUUff30MnEfJBJiA5ghMJAIEi9vjeS6oAfj7+lGQSO+IBWJ8TrkSD1xBw31YuN68ci2V1tbQ+Qvn6XL5DTL3dQj/ihDpbYH0TjBOd2cRWlkIH6s+CBh58YkuhRpXESDKGDYVFFHeP/2TNC2HaFBT3yAjKiAAwUtkrCgK1IFohb2/+LErJG4fB3u12KSwxPzBk+NSVZWtbxCFcBx9w/h6usz01s9+eivtlbVJ1KdmjH7u+vG/2PUH6ajUcwbjerSDSJ64hChpeI50ZO7Mlbpi9At+JtY0XM77MxYY5bnbkFtAq/4yg8ofvEr7RL/qRdQs5sfxWVKM0ceqU31OzxEgw8L/tL61m8pFBoyBgRET9MAAI2XEh1FEiH10sbtM+TomwAS0S4AFEO3OHfecCUyIgMHbi6JC/OTLsWCHBBcmwASYgFYI8P9YnjVT44kNjueRZiMyIk5GeMD/o7+/j9auuUum02pvb6ZTp/ZLbxDs2ly1SuzYTM6UviOuimP9rq7j4yTFjzfffJ3OnC4RkTeFlJ2dRykpyRQbGyu8YkbvkpwsM/YAmSw5vo8JMAGFgLsRILgeC8Np4uVuUQseExU/1G1gIX+DeEFccFbcrVu5LiYikvByVR/acFWnq+NKv3Ae/b1XRIPgpT7urO/qY7gX4oAzxjjnKL44OzZW33EOYsKXEpKcdgX1KX2wihRhToUrx34q/cC/SgGDL229z64d9XmnHRjjIOrz9o6U/ZuMmKZUjXEVFC6WL2dFYeDsnKtjExV0XNWjxeOWwWEqudJEv3r/PLV39cshIPtFXLg/PbutQAggcVocFveZCTCBKRBgAWQK8PhWJqAlAn2WIbpU3Up1TV1kETtsUUxit21EqC8tyogmX2EKxoUJMAEmoAUCHAGihVly3UcfkW5jffEXqKurk44c2UMlp/fLF9J09PbCzJQoNi5NGqmvW/8ABQaObSTquiU+44zApbLr9Ktf/UaawCYmJgvxaZmdGBIVFUUmH1/CxonJFvYAmSw5vo8JMAGFgLsRILh+KgvY00F8utqfSj2OIoSrcU2lDVf3Ojvu7JirPinHJ3LPWNeqzzm7ztmx8fo21nmlvqnWO9X7Hfuo5wgQsMCaB8SPVnMfwfvU3DNAMELnwgSYgD4JsACiz3nnUeuQQEd3P+0+eo12HSu3jd7H6E1rchMoJTZECCCcBkOHjwUPmQlokoASC4C8yCgDwtwQoe4czaaN6UQ0R1RkHD3wwFeFh8haOxN1k8kkIkGEkXn2csrMzJfix1jRH9oYsWf1cnioR+ZYxwtmuGWXLog0YkZKS8+SYkhGZhaFh4VTekaG8I/JoqjoGP7d8qwp5N4wAV0QmEgEiC6AjDPI6V48H6c5Pq0BAnqOADF430YpcSG0bU2m8AYaMUGHJ2pkCK97aODx5S4ygWknwALItCPlCpmAZxOwDAzbdbBXlRPTs3vOvWMCTIAJjCYwYLGGtY8+w0c8mQBEDcVEPT9/hezq0NCgNDtHgRG6u7tZPXmcnto3iIeDgjcKhJDBQQOVnj8jX/BbQXRI9sJMKipYZIsOgRgSKnxDsCjJYqOnziz3iwnMHwITiQCZP6Oe/Ej4M3Py7ObrnXqOAEEUa35aBOUkh4+aXpNh8hGuoyrjA0yACWiGAAsgmpkq7igTmBqBQGF2vqYggbDrQV0yxM4InOPCBJgAE9ASAexYV4yxBwb7aVgYQXiRa58ILY1NT33Fgg1eVosP6+eTHj09EMGEZ3hocMSocyaeA1epqRQxBG3i58FBCx0+GJGNpwAAIABJREFUeJiOHzlOIaHhdmJIoTCkzRDRISEi77mXmDhnYgh7gMzE7HGdTEBfBDgCZGLzzREgE+Olh6v1HAGCdLmDQzep3yK+04h/lYLIEIO3kf9m0MMvAI+RCTgQYAGEHwkmoBMCAb5GWlsQS6vzY+xG7GrxQidYeJhMgAlolIDBYJRpe1DgJWERRtoGfw5p1+h0kh5FD2WuIH5cuFBKb7/9NtXV3pjRKezq66OSU+dt0R+uGlMEEfyLNFmHDzZKMSQ2NpGS0+JlZEhuXgEtXy5SZt0SQ9SeIa6EFlft8XHhyyZ8V7gwASYwQoAjQCb2NHAEyMR46eFqPUeADA0N09nyRvrksxvU0zeyuSQi1I/uWplCC5LC9PAI8BiZABNQEWABhB8HJqATAlhgMfdYqG/AaoCuDNtX+IAE+XM6C508BjxMJjBvCMCjIDg4jDo726ihoVnsWB/J7ztvBjmPBmKN8KBRQoer4/No6G4NpfpaNe3Y8Q6VlJx06/q5uAhiSM2NKvlSxJD8POEbsr6YvvCFL1DOwtwpGafPxZjmuk2LxWLXBYOIhuLCBJiAlQBHgEzsSUAESExIKP3vv/9r6unosLsZx7noj4CeI0AQ9VHX3E0fnaqSJuhKSRfZLxZnxQgBRH/PA4+YCeidAAsgen8CePy6IQDxY+fhCjp2oY76RCgoiq/JQIsyouiB4iyKCOGdh7p5GHigTGAeEAgTXgR+fn7U3Nwodqi3UG9PjxBEgufByCY3BEVIUO72lIiKm6Ij/cJjotPcIVKWGSkoMFh4SPgIIcT58cmNXvt3wfw9LiaMWhJSZ3wwEA0xH5Mp8A7x9w+Q4qMifqxauYri4uJkOiwuEyMwIHzYerq75U1e3v7EqcMmxo+vnt8EOAJk4vOLBe9NBUWjbuT0WKOQ6OKAniNAMMEmgzfFhAeQj1jzUEqsfK+L6edBMgEm4ECABRB+JJiATggg8uNKdTsdLq21jdgodhr6+Riof4B3TuvkMeBhMoF5QyAsPJyiYsPo+vVr1NrRLoyc57cZutncRnX11TTgsGNcmVCjWEA3Gn2luIAFakVomOsJh5fEkSN76J13XqSkpEza9vmv0IKsQukxoRyPjkmkhx76GmWk5+vW+HzxksX0/He/R2azeUanrK21lX7/yu/p1VdfdbsdR9Fj+YqltGrVesrMTKeIyCjpBaJOf+V2xXwh9fR2U29vryQREw1xkD3Z+LFgAgoBjgCZ3LPAYsfkuM3Hu/QcAQKj8xW5cZQcZ785yijM0WPCOGXufHzeeUxMYDwCLICMR4jPM4F5QsDg5UWJMYG0MCXCbkSJUcHkY+T/CubJNPMwmIBuCAQFBVGwWOjHQnrjjRvU32ddRJyPALCYcfLkPrFo/ROqr6t0OURExISExlJe3jJavuJOystdToGBocJgfG525iPKY0ikTWptraOLF09QX18PdQu/FhxHGRjoFwv+DdafXQg7Lgc7j07ARDwiIkK+ZrogWmrf/n1CsDCM6wMSHBRCMTHxQphKFM+TVfTIyV5AISL6ikWPqc/UoMhP3traLtP4GQwmCgsLEb+vQVOvmGtgAvOEAEeAzJOJ5GHMGQE9R4DI71Yiw4WS5QLpwHEM30Dn5lvxnD0G3DATYAK3CPCqJz8KTEAnBEICTfT5dRl0x9JkuxEH+BkJ57gwASbABLREIDxcLMIKHxD4EtQ1tFF3z0h+Xy2Nw92+dnV3SPGjvr5GLD6HU3jY6MVy7CRvbSuly2Wn6MyZT2jr1q/Q7ZseFtdGudvMtF43lvDi5WUQJtqr6Nk//j+yzbj4VPGHqde0ts+VTYyAInggsgriIkSPpUtXUUF+3oRFD07lND57S38ftbW1ksUyQAGBgUL8CJaLM1yYABOwEuAIEH4SmMDUCOg5AgSCR31rN5XfMIsNNyMm6IEBRsqID+P031N7tPhuJqBJAiyAaHLauNNMYOIEkJ5ChnuGTfxevoMJMAEm4GkE4AESIUQQ7GRvaW6WL2V3l6f1dTr7A/HjgQf/mAoL14scxn52VTc13aDKqlI6fuwDKrt0QZz7LwoPj6N167aSyQTfjZHLxwoKGc8/xN17lUgPZ+P3FikY42KT5AsF4ocimKjrR19ctTdWP8e6x7F+Z/2b78cgHKL4+viL1GQpMsojt2A5rV65RJiZZxMirPA75uPjI18TLYOqxYaJ3quX682dndK/CAWCZkgwmxTrZe55nEyACTCB2SCg5wgQy+AwlVxpol+9f57au0bS5KbGBtOz2wqEABI3G1PAbTABJuBBBFgA8aDJ4K4wgZkk0GcZokvVrXStzt74NC4yQBihRwtDdO+ZbJ7rZgJMgAlMKwEsymZkZsloCKSQKbt8mVasXDHvjdD9fP0pPj6DigrXSs8PdRkeHqTVfVvk+d+9/D2qrLhCp07tp7z8lcJfIN52KVJqdXSKz4OqS9Te3kyILkEJDAih6OhESk7OdJk6q6+vW3iR1FBDwzXq7GilfkufEGJ8KTgknBITF4h24qQPCcpYESBKH5qba+U4IiOiZZtIadbW3iL61SS8TELE8ShqbmmimprLtvbQz4TEdCGeJIuF+tGqvtLHqsoLMs0WCvqXLjxGEOWA+np6OmT9EGAgxuipBAX6UlbmQsouyKIFgsmypXm0dMkSio2Ll2IHdl1zJMLMPxHdXV3SvwgFUTeBQnTiwgSYgD2BKLHJAYV9LfjJYAJMYKIELINDUvxoNVujxOF/ivf9lonWxNczASYwHwiwADIfZpHHwATcINDR3U+7j16jXcfKyTIwLO8wGb1olTAHS4kNEQIIm4G5gZEvYQJMwEMIeBtECHt6htw5DSP0C6XnqLenZ94LIGr8BoO3XVQHFvIhPuTnr6aU1ALJ5fr1q9TdbbZdB0GgUggfn3yyg06fOiTTavUKbw4UiCupaXnC4HoTrVx1DyUL03JFHEA0R5sQS06e+JgOHf6ALl86LtJtWXev417MQ0FhMa1etYWWLdvoVJhQ9x1iTcmZg7R79ysUFRUnzdEz0oOlT8inB96mEyf208KFi4UYky1FnAsXDskoH/RV6efWrU/Q6tVb7NqCWTz8UlDvpYvHbddHREZSUdHtks3lK6eEOHSZli/fQPfc85TwXwiw4+ghj/iMdAM+MXd+bistzC2QokdiUjKZfHzZxHxGaI9daYe5S/oXQfQLj4qUUTdcmAATsCdwW1wcNR46RFELcxkNE2ACkyDQLja9+IQGTOJO7d6CgGeD922UmRRGj9yeTb39Q7bBhAf7UEIUr3tod3a550xg8gRYAJk8O76TCTABJsAEmAATmCMCXiKPUUZmhkzdc+XqRaooL6cOsZs6JjZ2jnrkOc0GBAiD+JAwkfbKKAWFgYE+aTw+PDxMly+fpdde/3c6euR9KVqsL76PEhKSqV9sh6uovEQlp/dTVWUpNTXV0/0PfJ0SE1LlwLq62unQoV309lv/LqIxqoU4sZw2bnpQpO0JE9EkbVJM2b/vLaq+dllev3r1ZvLzc/4HNyJDFHN0iBRNTZG0SdSllOaWOjp9ep84XiEWhWPI1zdARPfcZdfW2ZKDcmyI7Fi6ZIMUfiDulF44Ic3i4YOSlp4lRY/g4HBqbq6n0tKTVF5+gRrqK6R4k5CQIu6xyPr1Uvz9/WnN2rVyuPgdmqlID/YAGf+JqqutpfKKGnlhcmy8iL6aG6+e8XvKVzCBuSGA/58iQ8OoUTSPVD569jOYmxngVrVOoLOqSg5hwBRG9klTtT6ysfsPNy2k/84WAkhG/Oj0kiYDe86NTZDPMoH5SYAFkPk5rzwqJjCKQKAwO799aQIlRNsv9MSHBxDOcWECTIAJaIkAFkbixM7Q6IQE2W0sJFZX11BmVvaMLepqgQ+EDovFItJFtUlzZV8R1YEUUxAdkPbq5MmPbeLHQw9/U5qkh4VGSnGkrv467dr1Ir35xn/S4UPvimiQXJHS6jHB0yDYXqWPP9oh02qtWLmFtm//phBBlkjxACmnCgsP0Su//zFBmED0Be5NTVkwaWQDFhGpIttKpy/c/7RM+aW0lZa6k/7rV/9MlUKoKbt0ivJyl0sBBKmzDh18zyZ+bN/+/8kIEaTWamquk5El7733XzIyBqbTpkl4W0x6QB50IxYFZrqwB8jYhPv7++lGbbVIJVcrUrEFyHR+kSJKiQsTYAIjBKRtVW4+0a53CQu5HAXCTwcTcJ8AInibKiqoIyqCwnX6fQe0+i1WzzOFnEGIH8M3xQYQgkzChQkwAT0RYAFET7PNY9U1AX9fIy1ZECP9PtQF4aHes7AYomv4PHgmwARmhECgWMTOEmmw4OuAHf1nz53VhQ+IGqba0HtQ5Dru6ekU6cCOCn+PczICJEmksQoRURIodbVVIrLiIEFcQLqqlau2iigQ665zxZR83drP08ULp6WQcbnsDK1cuVnyPXfuUxkZEhMTL6I17heppFbYvD4gTECgqKo8L69B2/DfUKJHJjv5wSK6BGmqIHAoURqIKklNWyhTfDU07JaRHb19vWIROZiaGm/QlStnySgM33Nz19Ky5XfY0mNFR8WLaJcvCK+Ys1JYAQMuTGCuCLS3tdGlsuvi97Wb0tKyRLq5FOm9woUJMIERAliehC9RtIhaQxqs4NRUjgLhB4QJuEEA3+kqb1wnpL8KS7F6wCECFOlj9VLgf3q2vJF2Haqi3oEB27Ajgvzp/g0ZtEBEh3BhAkxAXwRmfguYvnjyaJmAxxK4OXyTunsHCF4g6heO4RwXJsAEmIDWCOAPuYL8Arko39HeSsePHyez2ay1YUyov/DAaG2pp2rh7XG9psr2qqgso7NnD4sIjv+iN9/8T7mzHCmgloj0UEGBwdJAtrFRmJeL9E8QCLIXLJKG5eoCT5Go6AQpmiB6BP4hEBU6zR0iPZYwIReprmJi06UAYTDYL9YiyiQ1LZ/gtQExqrq6TKaomkpBXenpBTKCQ11gXo4UXyid5nYZ8QJPEYyvo71eeoQsyC6S41YKImPCQoVPScEqkRKL/+idyrzMt3truxuok5opQCwOzVZpEn42lyvOy+aQxi85JWm2muZ2mIDmCIStXCP7rKTz0dwAuMNMYJYJ9HSZqeuzUzL647Yga2pY+EzNRgToLA91zObqmrvpk5Jq+vjUyOvk5TpqaLWaoo95M59kAkxg3hHgCJB5N6U8ICbgnECbuZ/eP1ZJ+89ct7ugID2avvi5HIoI8XV+Ix9lAkyACXgoAfwhV7S4iLILsqQPyKnPzorohYtikT5u3v6RB6HnrTdfoA/e/43TWYH4gOgGiB9bt36FCovWyUgNpKlqba2T4gS8P4KCwmVqK3URGoEQD/xEKp5YGT1iNjeQuauN/AOCZUotlBCRLis0NEqmGcP1SvHy8qLw8BjRVpBsH5EZA4ODk97VDpEGdQUIEQPpu8YriqdIb2+v8B7xo8CAELtbUAfGm5iQIUUaxfh9vHr5/OQIaMUDpKzxGh2ruEQpS1dQ8fp10hdlNkp9XT2VnbtCg0ODMo1fgnjNlB/LbIyH22ACM0kAO9ezn32Wyn7xC9kMp8KaSdpct9YJwC+nfO9eGf0RmJUnh+Pr60NhYfrb/BHkZ6LkmGC7NFixIv13UAAvg2r9Oef+M4HJEODf/MlQ43uYgAYJ9A8M0o3Gbjp1BVaC1mIU4bFhQb40KHK/c2ECTIAJaJFAVFQUrVyynI4cOCoX948eOzrv02CNtXifmJgs/ASW0upVW6T4AX8PrOnC7Btm6EqBKALRwrEgDY+Pj0lGifR0d8vTuA8CCoqPj2uxHFEg8BxB6bdMfXedUpes0I2CfoINBA4/4avgKPCgCggqEFa4zCwBLXiAQPzYU3acAvLj6emnn6bktPRZESE6OztFKrbLMkrL18dfpvFj/4+ZfR65dm0TGBaR6r5RwvNLpMK6fOCAHAxEEKT5QXQjFybABKwE1OLH0Jpim9dZZHi4iKbV13cfGJ0XZkXT30Ta+58axeaphKhAfmSYABPQIQEWQHQ46TxkfRLwMRqkAfoS8UVAXRKjgsngZBFMn5R41EyACWiNgEksyBcXb6AdO96hkpKTdOzUCWoT+fWDg0fSH2ltTGP1NyQ0nDZvfkymtsIif6/wEMC/KD4mPwoNi5aeH0j/BJHDVVHucXYeQoZSUD9ZLURsxww6yiHtjA8f0z4Btfjxta9/g3LEYupspQZpbmykTz89IP0/8vKLZBo/+BxwYQJMwDkBJToqcs16avHxoisf7pPmzlHp6ewL4hwZH9UJAUUEhPCBFHFXrlyRI1eLH4j+iI2PnxWB35Ow4/8NZLjgLBeeNCvcFyYwtwRYAJlb/tw6E5g1AiGBJvrc8hRasdCaB1RpONDPSDjHhQkwASagRQJYtMzISKecnAwqPX9GppU5fPgQxcbGzstFRcXfYtmyjdL4GwX+F0q0A/4YRsSHOj0VfkZkxyhhw8mEI1Kkq6tTprHyDwiQ4oo1ssMqsvT399Hg4IiZpLoKa6SI1ffDx+Q6UsRJs9NySBmfErnirNIB4RfCRd8E5lL8GBwaFn46VTJdHwr+30IaPy5MgAmMTwBCYWr+MuozBNK1k0eoXSz2hgohBAViCBcmoEcCEAOR7gplIDOdfJMyyPsWCIgfGRkZ4vvi7HlcecocIHKsvrWbym+YRSTzyPfWwAAjZcSHsTDiKRPF/WACs0iABZBZhM1NMYG5JIBFwpgwf/niwgSYABOYTwSQ1/iOOzbSoYPHhHH3Nfrgww9lVAjy6s/XAkEDpuUQNyB6qIta/FCOe3sbpO8HBBT4ZHR1dwghw2IXJSJTZQnfjs7OVmmCjlRRQYFhFBAQRD7CGwSlo72ZurvNdgILjg+LVIrdEE4GzDJ9FnxEjIbZ+5qJ8ZlEOiGMD6Wzo1UKQ2o2aqN0CDxcZo6Ap3qAzKX4Adod7W0yTR/SXyGaa8mSxVKs5cIEmIB7BBDdmZOTI0X9luZmamqsoZ6ODimGcGECeiQAo/OwlHgaMIWJtIoj0YSK+BEW5hDGqxNIlsFhOnGxnn6x6zx1do9854MnyHMPFgkBJE4nJHiYTIAJKARm7y9TZs4EmMCcEujuG/h/7Z35k1XHlecP1E7tG0VRxb7vAgmQENqs1bawLVltW2NbTIzc3eN2z/zQf8H80H9AeyJmOiY8PW53tJe2ZVuWbcm2doQ2kNgRa7FUAUWxFFD7iuZ8873zKutyX733aq9634wggPvOzZv5Oedknpt5M1OOnr0m5y63DCpHdVmRbFpRIfm5WRNaPj6cBEiABIZLIFcH5x968GG5a8tr8qdXXpUP3/tYdu16V5599uvTchXIcDhhO6zKyhqZU71ITp7YJ+fOfqYHnLfogeiVsewwcXLt2iW5ePG8uzZ/wXIdpK10B6MvXrRSPvowR5oun9HB2/N6mPjCQZMnWP1x4WKdG5DCJER19UJ3HggOJx+PhBUwlZVz3fkfFy7US2PjOfdsfxuwLj0f5OSpfe6sGKaxJTAZzwCxyY/Suxe7Mz/Gc9sro93Y2CjvR7e/2rJhjWzaeLdkcEu5sTVG5j7tCGASZMWK5XK5sECuFRdJbhcntKedklmhpAnY9Ia/n0NZWanMnz9/2m4HmzQcFezu6ZOeXp53mgozypLAdCXACZDpqlnWiwQCBNo6e+XtTy/KHz+uG/TLIxvmy/L5xZwAocWQAAlMWQLY57d23nz5yhe/6A5Dt1Ug27bdLwsWLBi3emEgMzv7zoPFx60ACR60YOFKWbPmHjcBcvToJ3Ly5H65a8P9sa2xOjpa5PCh9+X4sT26gmO2rFi+XirKK93vK1ZuEhywjsmF93e/qv9eLtVz5sWeWN9wWq+/pqtHbsj6Ddtl4aJVukIle9wmQLDSo6pqgU6CLJazZ07JsWP7BWVapHXG5AgmaI5+tlf273tfV6q0qZ5Gf9IfE0i9vZGBuMxxXP2SQO38WQkEDzxfvXrNuO+H3tHRIR9+uFuOHI18qb502XJZt27tuJ09QkMggelEAFv6LFy8RIp1BegtPffrWnOzdHEiZDqpmHVJkQBWfJTohGB5RaV+vFKa9n1LZsYMWTqvVL7xyArp7I5sEQakZUV6JkoZD0FP0bwoTgLTggAnQKaFGlkJEkhMwA46z8nKkO7eSBCAfzORAAmQwHQggMGQ++7bLvc9eG9sFcjbb70tz/+n58dtFQi2GizVFRNd3RfvWF0xWow7dRXDcBMORt++fYfU1X2mEwR75Rc//5+6EuSIThwscgP3Z88dlT0f/0nwjIcefkY23f2oruKInP2xfPlG3Vbs6/Lb3/wfeW/X7/SLui4dvL1XCvKLpbm5UQ4d3uMmThYtXiaPPvaczJ+3NKkB5nj1SXWLKmzfVVE+WzZvfsiVY//+d+SnP80TnJWCMjY1nZUDBz526DC5E++5w2WL+7q62qW1tVnKysru2JZsJPny3pERCFv5YYcqjyzn1O6+0FAvr7z2mly+fEEWLVomTz3+uJTo4C0TCZDA8Ai4Q47Ldfsf3eKnZt486e7ulk6daETq0zMRMnVinH+Tw3S2A/McbDtZUFDgVhQiFmXC9rAzZYVOgCycEzkvz5hkzJwp2ZlkRBshgXQkwAmQdNQ665yWBPL1sPOnty+SzStnD6p/WUmeFOcP7BealnBYaRIggWlBYLEegopVIAf2HJKzZ0/Jf/ziZ7J6zWodyL9n3F4IZ1fN1rM2bkvjpXODtpcaKWAcKo7tnXDAtx0wHnbWx1DPwXZQq1Ztkm89/9/l5d/+i5sowJZWOOwcyW1flZcnTz+9U5586ttumytLpSUVOrHxLfffXbt+LXs++rMcPrjL3Ysy4VwRbK/15adfkPvv/7JOOs2KnROCsznKSsvdllh+sus4a8RPuTn5Oikz102++Ae3m0x2draeTVKkWzuUSlFhSezWgoIS2Xrvl+Ta9UZ5561f65Zdr8nJ43ti9audt0Yef/w5ef31l9Q+jsbuCx4aP6gwSf4HeVy7ftUdIL9p47K039ZospwBMhlWfsCEsPrj3V3vuLYJK6Pue2CrYIUaDnVmIgESGBkBTITAl/AH22MxkQAJkAAI9PV/rufdBba/0hHQ25/PkJmigRsTCZBAWhHgBEhaqZuVTWcCudkZsmZhufvjJ92xg91/OhsG604C04gAVoE8+shjsvvDj+WXP/ulfPDhHvn5z3+ug+lV47IVFtrTFcuX6SqQd+X48b2yZMnq2AqKkWDG9k53bXzQneGBVFO7JHRiIJlnYGLi7k0Pue2icA5Iff0Jd+g5UmXlHN26aq0sXrxWysuqBq1imKEj/JgQ2fGVF2X9+vvlzJnDbqKhR7+4zdZBp7lzl+ie7Bt1W6z5+hViiUAeCRMYW7c+ofcu0W2ncnVP6qVxr8/Ur/IwSYKJFjwjXyc5qufUDqoWJn2KCovl6R0vutUsJaWz3TZduI5nYluuZ575vixftilWN5Rv0cI1skxXsXS0t7gJECRMtOAg3dFIPXqo+oULJ5VZk5b9a+M24TYaZR+LPCbDGSCTZeUH+J45c2bQ6o8dX3yah5+PheExTxIgARIgARJQAt09/XKo7oq89M4p3QJr4Dy62ooi91FocEyE0EiABKY/AU6ATH8ds4Yk4Ajcvv25dOohYF26B2Z3b5/7IqK1s0eydHno/Co9QFAnSJhIgARIYKoTmL9osXz3+W/L6VMnZY9OgLz0q5dl6dJl8t3vfmfMvwzFkH955WxdAbBJXn7lQ7fV1Eo9OwMTGCNNVbPnCv5YSnX1h92HSQKsBFm8aIWb0Ojqelh6+yIvhrPyZrkJi8zMjNjqjWC5y8sqdYKnwq0k6eiMbDWSpeddYOIC+QZXU7izOULKHu868phXu9D9QQqrJ1aGoPyCP55Mv255cr25Sa5euaiTRItlzdqtWpfIWR9YMYKzQPZ+8qZu99XqDmovK58TrN6w/990pVFOHN+n24IpVz2PhmliCfgrP154YadMxJkfRuD69evy8ssvu/OJbPXH5i33cPXHxJoIn04CJEACJDDNCTRea5f9p65Ic2tXrKaLq9tl27rIB0XTvPqsHgmQQIAAJ0BoEiSQJgR6dPnnx0ca5d0DF6W5rVNutHXrlxF9snp+hfzt19bJ3AoeBpYmpsBqksC0JoC9j7fdv03+8wvflauXb8ip08fkRz/6seTr6pAdX9nh9gsfy4Tnb713qxw/cVw+0cH2Yj0TpGbugtiKiOE+O2wiYLh54T7kh8mG7OyBLXjsGUM9y1ZaYBIiLy+ydZbl5//tly1efqleT5TnLV3J8tqr/+a26MJh6N/85t/L2rVbdHImS7dAmCEXL52Xgwffcwe5L1q0xq1KwYRPvHIky7f5xlX5+KNXJSu7VZ544nl9Xl6yt1JuDAj4kx/f/7sfyMpVq5M6j2YMiuLOJHj7rXfkpZd+J7duNuuk3F3y7Fe+xtUfYwGbeZIACZAACZCAR6CsKF9Wzi+TLh3zsDS3rFDKikdn9S9hkwAJTC0CnACZWvpiaUlgRATqGm/J2wfrpaf3tvTql7JZ+mVuSUGOWw3CRAIkQALThQC2wvrSl3ZIQ/15+d//fF2OHjkg//TD/yXtug//c19/VipnV43pgCgOZH36y1+Sn/z7r2X3e7+R7Q8867ZmGo2VIKOto5EM/o/k3tGuB/LDCpbq6oXuTJL9F95xj7hw8YtuouPmzWty5MiH8sH7f3CrPzZuul8WLFw5Ip18rgBuaL7vv/9HPQPllG699awb2J6IA7bHgudI8pyoM0DCJj8m6kBYrLw9ePCg/OTn/+baoOKSMnnuua9qe7Cdqz9GYly8lwRIgARIgAQSEMjR3S02r5oty+cXD5LM1O1WcTYqEwmQQPoR4ARI+umcNU5TAtmZM6VWV3msmFeq211lSl5OppQXzpJFNYWSr1/HMpEACZDAdCJQXT1XvvOBQeflAAAgAElEQVSdndLa1uW2wTp48BP54T+166HfDfLY41+SNXo4OiYqxmKwGnkuWLhInv/mV+Xn//E7efONX8g99zzqzgSxVROTbfJgOugeq1Lu2fyotLY2u3M+cMg7/vgJB8k//IWv60qN70hxUVnK1Y4ebSLuzI+L52Tfp2/GJj+2bt3Cge0o0Yk4A2SyTX6cPnVCfvKvP5Zdb7zrtr567LFH5Bt/9dyYr0JL2ah5AwmQAAmQAAlMMwL9/bflVnu35GZlSOGs7DGJ96cZMlaHBKY9AU6ATHsVs4IkECGAAbkNyyqlurJAijQIKC/CPu8zJUO/gsjMmCE8DJ2WQgIkMJ0IoM1bvHS5/Lcf/EAKC3Llxz/+hZw9e0p++MN/ljff2SX3bn1YVq6Ypwd3r5G77tow6ueD5OjB29h6Z+d3MuUPf3xVdr//ipw5e1TPrlgj1XMXapkiZ1JMJ+aToS6Y1MA2VIsXr3MHtV+6dE4nK3p0q69s1XGZrNAzWZbrYejgj4RzQ1JJHR0t0ni5Xg+QPyaNjeeksLhfnn/+edmwYUNs8gNf/iONxeRaKmVNJ9mwA88nauVHnw66HD50SP7vj/6P/McvfyMdHe2y5b4t8r0Xv+cmRplIgARIgARIgATGlsAlPf/j5ffq3EMWVBUItsOqKsuVqtJZUqw7YDCRAAmkHwFOgKSfzlnjNCZQrStA8AcH9eIF/cqNDjl+rtkR2ba+hgehp7FtsOokMB0JYAB04eIl8uL3/quUlFbKr3/7a9m394B8sPsDd0A6tqT55jd026KqSikoKBz1AWs8f+myFbJzZ6VuhbVb9nx6RA4eOqVngxQ63Dk53IN4rO0Okx5+Onf2iE5eHBnWY7u7e9x9OOsD6b771si2bffLPD303CY7WlpapK7ujJSVlQ66PqwH8qakCPgrP1588cUJPfAc+j9w4KD8y7/+P3n5pd+6yQ+c+/HX/+WvZQtXCCWlTwqRAAmQAAmQwEgIYJzj/OU2+csn5+TazU7JzprpJj4WVRfLcw8v062xqt14CBMJkEB6EeAESHrpm7VNcwLo6Nu7euXUhZty5Mw1OVV/Uz6rv+bOAamtKpRluj0Wg4E0NxJWnwSmGQEMTGOAeufOnbJ+3Xo9IPtNtwKk/uwl6ezqGPPa4vk4eP3LO3bIvXo4+omTp+RMXZ0034hMPo95AdL4AX19vW7rodFL2ZKvEyrzqtfroPYad95HVtad2yr09vZIf2/v6D12CuY0XmeA+Cs/Xnhh54QceI6Dzi82NMjxEydl3/598tqfX5MjB4/GVn78vR7E/sQTT476KjMzCwz03Lp5Q20xa0wmcqeg+bHIJEACJEACaUxgpu5XitUeX71/iew51ijnLrdIw5U2R6StPb3jszQ2C1adBIQTIDQCEkgjAtiUY9+Ja/LLt4/J8fpmae0YCADqdFJk8dxi3Q5rZhoRYVVJgATSgYBNQnzh0Uf18OuN8vSOZ6T+fIO0tbVKbW2tlFdUjvrqjyBXtK1VOmCOPw8++IBgm6Tb0YNA+vo/d1sRIvHfo8shqAefcaqs8UKdaFsrrCS6557N7rGJZMPKNl2ujccZIJNl5UdbW5v860/+RV75/Rty9swpaWm9Jbk5s+SxRx/R1Wd/K088+fiYTX7AXjD5gRVm+fn5er7NF2QmP2WZLm7EepAACZAACQyTwNKaErfqY9u6uXK64YaOgVyV8pI8WajjHfzgc5hQeRsJTHECnACZ4gpk8UlgOARutHVLd2+/VGgQsGhOkaxfUilLaksEgztMJEACJDBdCdhECFZkbN6yRT6fwLMaUBYbqMzMGCDOf0dYjBYHTDSBtf2N3C3vVJ+RjF+k86RHMnxGSybszI+JYt/V1SX7j34mJ45/pmfNZOl5MPfIo489IV/9ylNjcr5QkCEmmy5cvCj5s2ZJP1Y9ZXBv8yAj/p8ESIAESCA9CGBVZH1Tq/uos7w4V1YvLJcVusvF5lVz3EdGs3VShIkESCA9CXACJD31zlqnKQFMbyypKZSntiyUzu4+WTm/wi0PLS/K068VM6Sn77bkZGfwq4g0tQ9WmwTSiQDawxk6MM40vQnYoPhEDY5Pb7oTUzt/5Qe2vVq9es2ErrbJzc2VrZs2S5meKbRm5UrZtPFuWbdurVTOrhqXcmG7sdqaGrcCJCMza2KUwqeSAAmQAAmQwCQggDNOf/XWKblwrcV95IkVILV6Buqcsvxx6ZMnAQIWgQRIIA4BToDEAcPLJDBdCeCrhx3bFrvqFc6K7I1+ubldDp66Iq2dPfLwxnlSrGeCMJEACZAACZAACZDAcAiM1Rkgk2nlh3EpLimVv/7e96RLzwIpLS0d93M48PztD2x3Z4BwJe9wrJX3kAAJkAAJTAcCWO1bd7FVDp+7Kqd1e29s+Y0zQLbooeeP3j1fFurOF/wgZjpomnUggeER4ATI8LjxLhKYsgSwHBQTHAgQrt7qlJP1t+SNvefk09NN7jD0wrxseXBjLc8CmbIaZsFJgARIgARIYGIJjMUZIJPlzI8gWTvfJ3h9vP7vtvnQbf2YSIAESIAESCDdCWB3iy0rqx2G+qYW2acfeXb29MuS6mKZX1XIc7LS3UBY/7QmwAmQtFY/K5+uBHAYOlZ9vPTOaXnvUIM06VLRnt7bDse11s7oIbzpSof1JgESIAESIAESmEwE/MmP7//dD2TlqtX8inMyKYhlIQESIAESIIEJJICzP3DGx2I95HznU6vk3jVV8tHRJjl85orMLSt0h59n6AcDTCRAAulLgBMg6at71jyNCeDg37bOXjl1oVkarrTpVlh6aKfukbl1dbXcu3quZGcyOEhj82DVSYAESIAESGDSEAib/MCqByYSIAESIAESIAESwM4WR85el/ONt2SpHniOra7uWTFHDz8v07NA5kmvnnOK1R88+Y+2QgLpTYATIOmtf9Y+TQlg78uaygI34ZGXkynLakvdAWFL5pZI/+3bckz3y8zSwQV8QcFBhjQ1ElabBEiABEiABIZJYLTOAOHkxzAVwNtIgARIgARIIE0IYFvvP390Xt7Yd05Wzi9zYxxbVs1xYxmr9P9IPPsjTYyB1SSBIQhwAmQIOPyJBKYzgbzsTHl88wIXHFTpweg4EL21o0c+OHxJXv34rJQV5Mk3HlvuggYGDNPZElg3EiABEiABEhhdAqNxBoh/4PkLL+x0217xo4zR1RNzIwESIAESIIGpTABbeyOVFedIjo5v7D3e5A4/P1R3VZ7avFC2ra+R3OyMqVxFlp0ESGCUCHACZJRAMhsSmGoEMKmBiQ/8QeBwRc8BeX3vefnd7tNuW6yKkjzdOxNLR0t5WNhUUy7LSwIkQAIkQAJTmMBkPfB8CiNl0UmABEiABEhgWhHo0sPNu3v6pLI4T57cstDV7S97z8kZ3Qprvx5+vl63+MbuFiKcAJlWimdlSGCYBDgBMkxwvI0EphMBTH68srtOfvd+nVy72enOBHlgba2sWBBZMopDxWbOmMGVINNJ6awLCZAACZAACUxCAv7KjxdffJEHnk9CHbFIJEACJEACJDCRBG61dbudKy5ca3NbeeOjzWcfXCaz9SPOP3x4RkoLc2Xj8tmCXS+YSIAESAAE2BrQDkiABByB5lvd0tLe7SY/Htu0UJ5/fIXM1tUhJxpuyGn9s06/oMCBYtwOiwZDAiRAAiRAAiQwFIHhnAHS1tEuTTevyV9O7JH8tXMF216tXr2GccdQoPkbCZAACZAACaQZAXyceaKhWX765nFpam6XC01tsa27H9MtvhdUF0tW5kx3vinHLtLMOFhdEhiCACdAhoDDn0ggXQiUF+XKk/cukI6uXinXrya+9sASmVuR7yY/fvnGSfn0dJNbEfLdp1bq9YJ0wcJ6kgAJkAAJkAAJDINA6BkgupL0jvS57d4tcqm9ST4+c1xK714sXPlxByleIAESIAESIAESUALYmSI/L1tKC3Lk9IWb8vbBescF55diJcj6xRXu/5z8oLmQAAn4BDgBQnsgARJwh4quXVTuloxm6L8xIVLf1Cp/2H3WBRQ9vbd1eWmLtHX2yu3bkcEKBhQ0HBIgARIgARIggWQItPZ1DinW0HFRWs5ck+q16ydk5Ud3d7f09/dLbm4eB0yG1BR/JAESIAESIIHxJ4AxiKu3OvXMj363SwVWdzyzfanc0K2wMAnywWcX3YecFXoeCM44ZSIBEiCBIAFOgASJ8P8kkKYEMAliqzsu6V6af/jgrLyx75yb/FhaGwkwaioLpLWjR85ebnGTJXPK8jlQkKb2wmqTAAmQAAmQQDwC2AKrfUaW7Go9IrJPJD87L56ofNB/WBpudcjKLZvl+3/3g3E/8wOfdRw8eFAaLzXKU198SnJycuKWlT+QAAmQAAmQAAmMLwFMfpy+eFN++26dXG/tkOceXiabllfJ1rXV0trZI//++jHp1ImRsqIcyc3igefjqx0+jQSmDgFOgEwdXbGkJDBuBLDS48LVFunu7Zd5swvkrx5a5gKMjJkzZdfhC/Kb907J0rll8sxDS2RpDffWHDfF8EEkQAIkQAIkMAUI5OXlyaaNm6Snu11OdTRJ/6x8ydAzPsL+7mkrkW9966vyjW9+201+4IOM8UzYmKvlVovcuHHDrQJhIgESIAESIAESmHgCmPi4Hd0qs05XeXxyslGabnS4gmELrFXzy2T7+lrp7ov03Q/dNU/PM82e+IKzBCRAApOSACdAJqVaWCgSmFgC86uKZMe2ZVJeOEsW1RTKwxvnSV52phyrb5ZXPz4rh89cl5u63HT9knJZPLdYenpu64DFjHEftJhYSnw6CZAACZAACZBAGAFsJfXoY4/K9ge2h/086Fpvb69k6YqRgoLCCVtV+sCDD8S2wEpYYAqQAAmQAAmQAAmMGQHb7upk/S3J0fmMNYsqZMOySll9tEInQOrlo88aJU/jhm8/udJ9jPmV+5e6suRkZ0jIaWNjVk5mTAIkMLUIcAJkaumLpSWBcSGQq8HDfWurZPn8YreMtFgPGLt+q0ve/KRBDtZdleysmXLP8moXiPT1fy57j12R5pZ2WaqHjmE/Ttxvx5oyCBkXlfEhJEACJEACJDBpCOCcMGwlZdtJYTAD14J/T5YCc9uryaIJloMESIAESCDdCGDc4HPvnFFsuf3ugQZ55f0z7qDzF59e584rfebhpdLc1il7jzfJp6ebZNn8EsGHmxh7YCIBEiCBRATGd415otLwdxIggUlDAFtQ4AAxTH709d+Wz87dkPcONbgzQVboRMcjd9e4M0Cu6DLU339wSn70xyPyweFLejBZn5Nv0EPUG/Uskfau3thkCIIbDH7Y5AgqG3Yt3vXJLOuW6CZZt+HIBg1jPFnYs5Kp31jJov6pPN+3sclkTyhXMvYfrGu8OhgXyPspnn0km6+vx0T5Dkd2suonyCeMo+kwWdlk/d3yncps4tlpGMfJIpusfnxf83UUVje7lqqNTDZfG00dzdDJDyRMgiDh/8nwGc0yJNJbomclU94we0iU72T0+bHwC3CwfJ0RRFM8ZmMh6/tmojLEK1fY9bBryD/seti1dJBNxs7BIehn8djEu47n2B/IWArjHnZtqHyTLdt45hvvWcn6sN0frFtYvsORxT1+ipcvricrGyzrdNFZPDaTsb5h+hpuOXEfxg3e2dfg/mDsAOlWW4/UN7W4jy93Hbgo7bpF90rd8upLWxe580kXzSmSBVUlbhcKJhIgARJIhkDG/9DkC+7atcstQ7/vvvuSuZ8yJEACaUAAe2/eau+WC1da3T6cO+5fIvevq9EzQWbI65+clzc+bXATH9vWzXVLVK/d6pTf7KqT9zRY6ezuleryfA1a+uTd/Q2y5/hluX6jUw8py3VbZh3XbbVe33tezl26JbNys9y+nc0t3TFZ5FtePMtR9mWL8rOlALK6MsXy9WUPn7kmb35a7/KFLPK9pdt2/fnjc/LpySuuvFU6gdOpf3989Irs1rNNIFuq5crPy3KB2F/2npMDp6/GZHv6bstHR5oGyRaobD1kP0lN9rPz1yU7M0Prliddemjbe/svyAdHG+XSlTaZq4fN47czWp43Pj3vypCrW5CVqSxCPMjvP9kk7xy44OSLdJJqVk6mNOi/weLw2WsyY8YMKZqV485xOXDqSkLZssJcx8JkoaNZWjd8UVOnh869tb9eUGas+KnQcuDLnA8OXXJlhmxRfo5bGZRIFuUtK8q7QxblRRn8fFva+qSiJFdm6m84+A5lwP6v2boqCfr89MRlVy/TO8p29Ox1nai7IA2XW50eYSOYiHv/4EU5UHdFOjSohj2Bi+kdZYJsvtrf2cYWtdsLjqHJIuDee+yq0/tQsrAnlN+XNd0g3zfUVz47F9F7qda1pX1A1nwCeneyqnfIZun/IdvR3Se71Z+MN/xnkKzqBrKwJ7w4+LKzy2YNsifLF7LYRxf1Nb+srihwvmb+c6KhWfJyslwZsArsdfUJ+A/KGyZbqDZnK8ZMFnqs0jJAj5+o/0Nn8LXKklnq85muDG8qG/PLuZWFuhXN7Zh+48liS+DZmke/vjiZLfiyl1Tvb2sbgHwhC7uFD/u+BlvM0zKcha9pGeBrJgt/MI5mt/CH02qDb+2L+IP5GWzE/MH3SScb9Z1ENu58B/mq3sG8WH0KNmL5gmNJQbbTpeVr/hBPFm2s+aQv23i93fkE9G5+dvu2OP+xdhN+hnxHIgu9wy/D2ivkC/3A16xdgX6CsvBNTLTDRnxZ9EWmd9OP2dOrH52JtVewEZ+jr0vzNdM7ZH1fg+xs/RAAzE+pLv+055zzDXCtUNsJk0VbfarhRqjsm3vrB/ka8oG+0QfC18x/0Fe9rf2q6QccTRZfRKItHkoWvnbsfLP7ehL5QxZtkXE0XytRv4as7z/WD8Ne0QehfTAfKinITdh2xpMNtnHWzoIt2gy/7RyJrGtno/0r2q0rNztlj/ZTftsJltAj+gprD1GGqxq7+O0h2lmdqon1K8F848nCh05obGPtN8pgssH+CrLWr6Qa0/htssU08As//kFMY/FBMKYJi39isl5MU11eoHofHP9Y++3HP2HtLHR5Rx/k9Vd+/GP9CvRzUn3I2k7rg+L1bUHZsPYQsUSmnmN3qC4SG1rfBlnEnH5/hXYWZYBffHD00qA22deltcm+bLx2FmzgP9YeIl/EKfBLtHEWp8Dnk5VFTIP8/HwtLrSYBu3W+4cuDmo748UpqcpaP+jHhRb/+P0V8oUs2GAC1OKfsL7NyaLPjMY/iFPQHvrxptkT7P+jI5E41I8X8a7wjsbCeJ7/XhFPFu2krzfYQ1AW/aP1TYgzEvWPvqyVwfo8tJEWF/n9o/8elGz/CNvz+0yLadAHgSP6NshYjB83phkiTrEYH/FPrF9RHx4qpoGvBfsgxO3BmMZi/MmmM1+/FkcOR2eXmyNxjukc9Q3aUph9+G1pUL/Qgx+XxZON924Y730v7D0y+F4GW/DfDQ+duure5xH3WF/gx7B+exn23mDv6NlZmbJb++OfvnncvesvqSmVWn0X/lwrd/5yi9a3w72jzqsslgVzCtwHmPj3A3fVyNrF5c4fmUiABEjACHR3d8vvf/97N6dRU1MzCAy3wKKdkAAJJCSQocEzlp3u/OIaabrWLmuWVLgzQS5cbdMBzSa5pi/3OCx9oX6JgcDshr6sYLXImcZbLm8cTnb5Rrs7PwSBzcZls2XlwjLJzETgfkV+8dYJqdYBnrKifBfwXG/pjMk+tmmhrjgpk/6o7E/+dNRNXEAWARBkcSj7OQ2QTFZHq+SIDm6YbHVFvszVgd3rLV3yq3dPSZMGpJDFZE1Xd7+uXLkob+w75/JdUF0slfrCdf5ym/zq7ZPurJMd25bqeSez3UA7ZP/4cZ1bHYMtv/A3ZP/tz8fc777sGzpY/PbBerc0Nyhr0JdpHrjvTyqL/Uw3LKmUTSur3GA8AkPki1SoL8jY4xRfzWKw9aOjTbry5rT7EmZ+dZGUawCKcvzhwzPS2Nwh3/rCCqnVOuPlKhlZ6K4tkG+VcsPgwDF9EQWLTp14Qb7L9KsbsHwXA/KfXXRlgCxeuIOy+C0ou0TvN1mrH/LF9mmQBYv9OmkDHS3RM2jwQod8IVuigwWFeugd9Il6YeLpqzohBxtB2nvssvzu/TpnT+X6ogEbuXC11dkIuJgsBlKgH5QfdYcsBjpP6KAhbAR6f+KeiO0lKwt7uqUD1n6+1WrP0I3LVxki5eiAH87O8WXhE7ARcAnKQu8YADDe5j8mCy556nfId7nmAT0aQ8jCX/GChXxj9pSbLbA9N1kZ9R/I3q22B18Dx1++fUL30s2UisI8V174MOzrhL5Y37u62snC13zZ6rIitfdC55fgiC+3HtkwX/1H/T0jK2aL8DXUt7w415XB/PIbj6yQe1bMGWS3kF21sNzJ4qXPl0W7hMkss3Ho0mShd5QXbcPOp9Y4O4XvmK9hJRt8p7QwJ8YceocsbNznaH5WrANjQRvHM6FLY+77ZCLZbatrBtm42QiYRzgO+Jn5A3Ri+cKevq/+kEjWfAeDzZA1n7B2E36GNsf8x/wsTNa2RjS9Qz/wyaAs6rZ6Yan7Mu/g6WvOnsAGPoE2FmUwX4NfgnkfJpm9ts1kTe/QD2SRT7+OIvh6d+2g2Ui0vTJZ6Mf8B/pCGwRdmk+gDYbeYU/wNd9G1iyucH6JATL0VZBFwsGbsJHf7j7tvk6EPUEWA+nQD/ofJEwM4zfIog+ELHwC/oP+8qhOAEEWejX/QZvjy8J/sK+19Zfw9wFf6xrUXzrmWregLPpW339gTwvmFOuHDDN1smWgD0TfOlfb85k68I8+6N9fP+baQ/gmvrZEnxLWdkLW903IdqlvmuwdbVy0nUW7hT7Sb2chu1DbnFh7qLLg7stavw07GyTrtbPIF+0LdITYw2QReyBGgD6t7TRZi1Mg69qoEFm0h8g3GNNA38F8g7KIEdBfWRmge+uD0F+hnbX+yvogv02G7EBMM9B2Il/0QdCn75uIaSw+CMY0Fv8gprE4JSz+2bS8yvH3YxprZ09rf2DxD2wEvunHNOgr0AfdEdNoH4T+ym+TrV9xH16ov/n9FTiG9W3I129n0XaCEezJ2mRrZ3NKM2Js0MZZO2t9G9rDmKz6o8UYsJFCLS/aOPiQH3Oi7YQuTRb5hrWz5j+wEZNFnOK3h7E4RT94gP8kJavtYVAW/0e+iGnAET6M+M3aWT/2sDYZNgKOQ8k624vGNMjX4kK/nTV/B0cMVKJdM1nU5zuPr3KxrN8mG3NrZy2mAXPEKUFdmj0hzvDjRfRNsH+0qSgbnh2JFyPvCmGxJWT92NLil2C+2A7Y13FYn4c2FWXA4L/1TdY/ok31YxLIoq02HSOet3cbk7X+0eJu+KvfP6LPQ7ms/ba+DTGN+Y/1V4ivgzGN9YMWX6MseFZYTFOrto8+09jCnl54cpWTDcbtQT0gX/ga6pWM7GTRmcU0FkcOR2cW59h7p8VPiezD4tBgTASdQw91Fwfea76lfBE/hdlH2LshdBaUxTtcrJ+I9s1mC77O0F7Yu+GB01fcGAD6X8Q99i7st5f+Owb8fIP27bk5A+2w9Wd4z8Mh5og1mrSvQT+KZ6GvXLd4tnvvwPsE+n0wxLv39g3VPOvDtSBMJEACqRDgBEgqtChLAmlKAKsO8AURBnwwkIN/Y5uroxqgnI1OcizSl+w5ZRhwv+0mSRDE4MW0tqrADWLIDaxc6NOvPHt1VQi2yYosdMa/W3R1CQImHISKrxd7NQ/IYmATCbKZ2lpBFveXFEQGoEwdCMRx3U++bHdP5BeUDS8DQVl86d/c2uVelnp6B/JGvrje2TVwDbJ4GcNvvTpYZwn5BmU7tT6QxW8mizqivsGE8kIW9Y7lqfXHvcZhUP20TLgOeT9vcEf+yA+pTwcJUX7UeShZBNlIYbIWlLrfLV9lifpZvu5mTfFksV9rrAx6r8nay5nli+umO7C2hHyNm/FAWSN1jdhJ5N6IHAYhTO/uuurrDtlo+fGbpZ5oAO7ryGzV2W4CWfesOPnavaiLpZis84kBe4IeYY/xZQc2CnA2gj9+vlFfAUuspkBC3VCvHH1hxr+RYDsxO3dliPgafkN9wdq3P5TL2X/UDiDXqS/lJgv7dvlGfc3J6jXkiz/QmfmaE4SslgH5Ov/RvCyZLeZkD/gL/DPmayGyyAfPRoL+8X+nN3DAZ3eazNfCdGmy8AdwM1v0fcdsHFzwe8x3osxNFvWF7IDtRWShZ8sXbCxZvk7vsbbvttvrGOWCP5gtxpM1ewrKgtnNtsiTYm1vtN30/Qz8g37mmHmy4NKnAzyQRTnB2LcRk7V6Ob1HbQRs/DYW5Qr6Je5DfTGRbgn3DLRtd9rIIF2q7ADzwb4GG8MgExLKBT9AGUzv9jzfRswvzX+wDaQlsECdrE+w69AP8sTkhy+POplPDNTt9h3+g7LF8vVsBPeAV3fP4K8dIWs24tfB2lfzS+Poy8IvYAO4Bl8zWeTjbDyqe+jQ+TH6lJA2zrcFk3U+ZLJeG2ftrN9umV0GdRFP1srs+xDKjPL6+aJ9MT6+LHRsfKzt9GXhd9A9/BuyEb/QfkV1i7IOyjfadrrnR2WhIz9f6N7x1DYwUlevHXBlNs15/ZUXe+BXsIEfIKEHsHbWMfNkwR+2jn7Q/A16jekyKgtdWkzj2s5oHABZa5OtVM6P9Xospom2s/gdbGDng9pv5QDZG1oO64NQf2tfrA/C/TF/8ziab/pti98m4x5L1h7i/5BHHBlpd6K+7fmQtUUma3nE+gqVNVvEb6Z3kwNPvz00WYtHTM7+dvbo+orB7Rby9fUD/Ye1W1aGRLJWDuQLWzO9g6Ofb6y/ivqlL4tnBcvg+quAv+NZwXxx7yBZx3Eg9gBfP6a5UzaiM9M78sO/cSZAUJdmT668agfWnsViHZGt/dMAABJESURBVLXFyPMGmAdl8X+0fbBb6x8H9WOBfF2fF4058b4CWZTL9HZH+6uykLE2Bs/zYxLrd/0YHe821v5CdqB/9N5LNF88y49JYm1hID4d8MtI/GP+g7bB3R/VD/zHtZlaXj8ZW5N1eo+yhazft/l6sDxMFnq35Mc/KPedOhs8NBXMdyrqDGwH2ajG4LAlMLT2PGgffvtmsn5MBPmBmMiLQ6P5+vYRJhvv3TBSDovLPLuL+kMwX7Tv5uf2fou+z9p3e8dA/V2f6uLwSPyCa/gwra1dYyWdRMZkCHwLH1Ue0g9FHt+8wE103LumSlc1XnEmtHKBnjMaXe3BTa8cEiYSIIEUCXACJEVgFCeBdCaApev4KhQJ22bgSxQchp6TfVW/8K7U1QLZ7mu+Op0UQWCHbZFml+S5L4CZSIAEhk/AH5AZfi68kwRIgARIYDwI+IN+4/E8PmP6EWC/P/10GqwRVoswTS0C1Nno6QuTdK2dPW5rPHxEiYle7B5xSVfanb+sW+XqamGsAvnBsxvdQ7HSBSvamUiABEhguAQ4ATJccryPBNKcAIIVrAap+HJkf14sf8X+rjda9UvuW5GvzN3yfl1W66fe/sFfGMXDiK/Q/K+L4snZF0xhvyfzrMjXKGF3R76+8n+J9zLqfzEbnlP41WTqZ19Z2Zekfk7ui0vvi6/wp4ze1bCvw4yxfTlqTwvK4no8ffiy9iVRqqWGbsZyoi34lXGq5YO8/yXgUPfbl1PJyLuvJXWyEas6gikZ3sF7hvt/PAtfu/tpPG0z2XIP5e/x8kDdkq1LGPN4egyTRRnC5OPJBsvsfzUY/C3R/+O1b0PdF9aGWfnhM+DtDxYYR3yhHS/ha8dk6mtfMicjG+9ZuB7WVpl8mC7wW7Kcw/gMVRb/t0Q2F+x3wji4tsH7+tbyD5NNtlyJ5FKpc5B9Ku2syfq6sLYzrIxhdQ4+P+w+/1o8ewi7L0w2Hhtf1vrSeLJhz0r2WliZkr3XlwuzzTC+qeaNr4sH8Y5+kR7Ua7xnpapPe1Zs5UsgPvVX1QwqV7Sc8ew1WN7h6hIxTbwypMp2LORHak/x2tF47RbqEHZPPHuAvNnEUO8KQTZDyYbpMh6HMNngs/yYJMyvgvL4/1D1DZP3rwV9bCj5yMqP5N7ZhipXGJ+h6jBcPx6qLkG2fpn8FUWms3hlCMaQwXzjlSGsvsnYh+VnssF3Lf95VrZgvsFYIV4ZnQ691XJBuWAdgs8Jyvv/9++12A2/I49W/XASZ0HhI8qlc8vcypbS6Cp+xIoYX1iv24siYeyBiQRIgARGQoATICOhx3tJIM0JYCssLE/FH0vY23PzytnuYOE8/XetHmiM1SJZKju3rFA6F/Tr2RRFscFqrBDB3tNz9aD0gvwsJ4vzRZbMKXFZlhXnOFns6wvZpfr1B2RzsiNPhCz+H5PVZbSQxx6pqxaUuyCqMD/S1OGQNmzVhQEh5Is8sZQW23Qtdlt44RyLSMbIH8tx3Z68KouEfMu1DCaLA+OQUG7I4suVmtmRsmBP6NrKolBZ1BepXA9oRXKyymRxdbtjZAOGBbNy3SQTAmzsn2sJ5UCZwAKc/HJY/dzKG7DQiQGTRd5BWZQ5kSz2YUb9kMDVcVOGKPOqBd2uzJZvUBb3BGWhM6ShZG/UdjvWOH/GZMECeYELEuoFlmVFERtx1/TfZk+md+g0ZiP6OxLyhX6W1ipztR8cwAfbw97D2D8bS81dvlF78mXNRoKy4ALd+LJZej8SZI0h6o3ky4Il9j1Hwh78QVns/19eqOfOqM4hi/+jvJAFFySzEdiP6cbJat2QIntWR5aY499I0Bvqj5dB3y+hZzwLyXjDh2FfSE42Wjdwgq9h6Tp8AQk6BkdcAw/UFX/gH+Y/yA8JZTBZ5GXJ7BY+jLYGCezhp0hzygd8ImbjWpeYfanfmz/E7Bb+br6mslYG0w98DbKom7ORqI37fmZ7zEM25jvQe8AfUF/fxgdkPf0oGzwnKGvMfd+BP0DvSJav7w8ma74TTxbPgr2bT5ifuXYl6j9gbmUIysKeTBY2EpQ15tC7bVWAfK29gg5hu/gbsmiDfL3DRmBPPnMn6/klGEBH0DvsCXUxXfqyZiMoL/zHZGFPjrnma20s9I5kekd7jPLBL61tsLYbukQCC7R/N6u1HdQyIE8k2Ij5T6ydV79EneBrKIsl/A5Z33+Qj8k6/4m1I1kxfzdfQ12sb4UurQwoY6zNjPqlsQFz9HtoK9Bu4d8xXUZlUT7YALibjiJ539l2xpPtuz0gC/+wNs7aTgywWbsFe7W2M9IWDbSdKIMvi3IkkrV21rVx0dgD3MELdYa+re2ELPKMyapvW3nBPhlZ3G+ysBPo0+qGfIMxjcmCO2zH+itrk8EU9u3aomh5TRY+ieTa2UBMg3LAn4IxjcUpaLdiMY3mmyimMd805pavtbPI13wo5m9a5lg7q+ytD4I9mQ9ZH+TytTYZ8U/Uh3zfRD+H5LfJzka0/Eh+O4tnwF8tX9ceqt5xb6TPjLRFzt+isYTrg6Icrb9CvmiLUF6/jbN2C3GkyUI/vqyV1287YxyVjclC77F4Qvsra5P9vi2RrK8fa79ha9bO+r6G32FPvv+YrOMYbWfha5CNxHoDfmltMu63PhP3DZRB7Skap+Bv+Lsviz7KfAK/QYemH/i/yVq7hWfg3Dtfl7jH7Am+jP8jDh0UL+q2u4ih/P4R9hp7XjS2RP3Q/g4li7bR+mg/JoHtuPJ6cYZrfzVPXPdls7IicRHeK8zOrK3OzMgYJGvtOvJFrIO21+KXzIzbThb9o8Xd0Ke135E+aKDdNL9EX4C6oh6x/kp9LRiLo23AeYNIqAMGo3EN90AW14xXxF5MdrAejIEva/UKxjQoE1KYHky//vtAPJ2FyZoefB8O05nFNPC1YEwT1Fkspom+g7m2IdpnhukMbUCsbTFbisYuwT7XtyWLR8DGbMn8D+2YXy6LLfFmFIuFQ9730GYlet9DXSwuszL4fua/70Fn+D90ioR7re+z9t3vz6zN9vs+3IePp7C1Gcr35L0LZNu6Gqmp1LEF/YjS7IMTHw4xEwmQwCgQmPG5Jj+ff/zHf5RZs2bJP/zDP4xC9syCBEgg3Qjc1v16e3Qfafu6BqtCELhgL+gm3euzpaNHivQaDrHGnp9X9SBRHJKGwStMpBRpMNmtATcOO8NXWAgucZAjAj7I4gBKBFSQxVch+GoEsh26PykCOxscTVYWX5dc0jNLsCcuBqvxIoTltS06AH7hWpvbE9vyDZPFOSc4NDcoC73jQE3UF+Wt0YMjfVn8PqdUD53TQ3ux6qG+qTUmi8M+EXBavr4s/g35Kzc63CA9uCFvK3OTXge3Cl02jKXDKDNkr+iequCOgDIVWbz0YFUPDglFsnyh40vKx/SJfEdbtkyDa+geduKXAYe7w64u62H2YGA2gvL5XMxGYHsXr6ouPXvC1+6Qhd7NnmB7zbe6YnVFvrAn1NWXNd7Jyga5QO84pNLP1/cJv66QLVMbgU/4vOE/GDDwyxBmT8gXBznCnq57dTNZYwP/MVnf1+AT4A1fC8raJA30APvyZeGXdZduOv/x8/VlcT98AvnCV8zXUDe0I74sDvDEgIlfBt/fTTboa/AH1K1aB5rMH3xfC+oSZTBZ38Yt36AuR+oPiWzcL8NoyJrttWq7ZGwsXwwMmP9YmwM7DcriIFPzNV/v8B+0m36+QVnjGGyDzNf89srXpdkInmuy+HeY3sNkg7rES7rfrpje0d74voYyQNb3NZNF+xJsuyELO4WvNegh70i+7Z3Rg8J9X4Nfghf6taD/WL8G/eAAc/SjkEV/iWR+6feBkAVzDBpAl0FZ39dMFv5+QdtH6wORL3SJqZxg3wY+SNZn+m2n9Zn43dpkv50dqo1LpT1MVbZTB3PR/hv3YOyB8ppfQHaoOGUkssE+CHnhcFk/pgF37Ivvyw4V0yTbHiYT0wR9MxjTwDf9ttNsPWgjVl6Lf/w+KCym8X0oUX+VjKzZb7w+s1HjlmC7ZcxhI347azEGdGVxh29PQ8n67aEfeyCvYPxmsaHFb/FiGj+OTEYWeo8Xp1g8Ea9vGyr+CfaDvmwwTgnGb0PFNGGyiJHRxvn5WkzjX/fbM3uvML3Ze4Xl4estGVm/H7O4295XkunzIIuYNVFfGmxPk4nnoQtrv62/sj4IcftQMU2wbwOvRDGNzytMdig9gEEwpgFbizn9/mqq6Mz67aC/G5ugzu2dJhg3B+3DjxuC7UUwfgp73wu258nYUrx8h3rfu6VbWKFPhc0F23e8u/hta9g7htk86o/JTluNg9iIiQRIgASGS6ClpUX+5m/+xs1pbNmyZVA2nAAZLlXeRwIkkDIBDGpasq85cAWHHCIh+LHFrcOR9e9PlG8qz0LZrLyJypVIFr9bHqmUwZf18/CfF6/O/vVE3CeDbCLGxiJMzr+WiG/Y/fEYTibZVOw8GfsYad1GyjwZe/a//grznzC7jedrYbKTWe9WtkR6hxwS2tNEssmwsTx8NrgvURuSyJ7CdOnnG3Z/MuUdabkStRfxOISVdzLbUyrlncz1SNYvkrGdsbL1VGwqFdkwH4rn84lkE9lDIt9M5BfxypVKvqmwGWvZePVN5CvxOITpJxU28WTDbHqs2MQrw1i3yan4NmRH2maExTrDtQefWZjtpJKvLzvWzMPK6ushUb2mms6Gq4dEPpHIlobrq6nYQiJdhpUhrB0La8PC7g2zDf9e/M5EAiRAAqNBYKgJEG6BNRqEmQcJkEBSBMICHUx4IFAKpvGUHc9noZ5hzwu7Fo9NvDziyYddD7uGfMOuh10bS9lkWYTJhV2LxytMNl5dKRvx0DAOYddSYR5Pdjx1MZ7PSrW+YWWLd835ZaA9DZNNpQzx7o93PcweUpENu380yhuWbyrlSkfZyVznsLKFXRsN2xlpvqmUYbxl6RcgHh6Xhek97Bruj3d9pHxHev9YlWsy5JuK3uLpKJV6pKKLVPINkw27lkod4t2fSh0mg2xYPcKuxbOFVOoQlm/YtVT0MBrlGs86jLS8YWUNuxaPK57PRAIkQAJjSYDry8aSLvMmARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKYEAKcAJkQ7HwoCZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZDAWBLgBMhY0mXeJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACE0KAEyATgp0PJQESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESGEsCnAAZS7rMmwRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIYEIIcAJkQrDzoSRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAmNJgBMgY0mXeZMACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACUwIAU6ATAh2PpQESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESGAsCXACZCzpMm8SIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIIEJIcAJkAnBzoeSAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmMJQFOgIwlXeZNAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiQwIQQ4ATIh2PlQEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCBsSSQGZZ5R0eH4E9fX1/Yz7xGAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAhNOoLW1NW4Z7pgAyc7Okp/97CU5cuRI3Jv4AwmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAlMBgINFxskKyv7jqLM+FyTf7Xp8mU5X19/hyAvkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkMBkJLB27VqZNWvWoKLdMQEyGQvOMpEACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZBAKgR4CHoqtChLAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiQwJQhwAmRKqImFJAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESSIUAJ0BSoUVZEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCBKUHg/wP1tX0nutz7VAAAAABJRU5ErkJggg==) # # I was working in google colab using kaggle api import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 from sklearn.metrics import f1_score, roc_auc_score import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import random_split, DataLoader, Dataset import torchvision.transforms as transforms from torchvision.io import read_image, ImageReadMode import torchvision from torchvision.datasets.vision import VisionDataset from tqdm.notebook import tqdm from PIL import Image from glob import glob import os import time import copy import csv from google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print( 'User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]) ) ) # Then move kaggle.json into the folder where the API expects to find it. image_path = "/content/dataset/semantic_drone_dataset/original_images" mask_path = "/content/dataset/semantic_drone_dataset/label_images_semantic" labels = pd.read_csv("/content/class_dict_seg.csv") labels.head() len(labels) classes = labels.name.values.tolist() print(classes) length = len(os.listdir(image_path)) # ## Dataset class class DroneDataset(Dataset): def __init__(self, imgs_dir, masks_dir, count, is_val=False): self.imgs_dir = imgs_dir self.masks_dir = masks_dir imgs_paths = os.listdir(self.imgs_dir) imgs_paths.sort() mask_paths = os.listdir(self.masks_dir) mask_paths.sort() self.is_val = is_val if not is_val: # для разделения на train/val в процессе self.imgs_paths = imgs_paths[:count] self.mask_paths = mask_paths[:count] else: self.imgs_paths = imgs_paths[-count:] self.mask_paths = mask_paths[-count:] def __len__(self): return len(self.imgs_paths) def __getitem__(self, idx): img = read_image( os.path.join(self.imgs_dir, self.imgs_paths[idx]), ImageReadMode.RGB ) mask = read_image( os.path.join(self.masks_dir, self.mask_paths[idx]), ImageReadMode.GRAY ) return img, mask # # Transforms torchvision.models.segmentation.DeepLabV3_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1.transforms() # transforms with which model was trained def img_transform(img, mask, is_val=False, size=520): img = img.to(device) mask = mask.to(device) img = img.float() / 255.0 if not is_val: trans_img = torch.nn.Sequential( transforms.Resize([size, size]), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), transforms.RandomAutocontrast(p=0.2), ) else: trans_img = trans_img = torch.nn.Sequential( transforms.Resize([size, size]), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) trans_mask = torch.nn.Sequential(transforms.Resize([size, size])) trans_img.requires_grad_(False) trans_mask.requires_grad_(False) trans_img = trans_img.to(device) trans_mask = trans_mask.to(device) img = trans_img(img) mask = trans_mask(mask) return img, mask.squeeze(1).long() train_dataset_len = int(length * 0.7) val_dataset_len = length - train_dataset_len train_dataset = DroneDataset(image_path, mask_path, train_dataset_len) val_dataset = DroneDataset(image_path, mask_path, val_dataset_len, is_val=True) train_dataset[5][0].shape train_dataset[5][1].shape img, mask = next(iter(train_dataset)) batch_size = 4 train_loader = DataLoader(train_dataset, batch_size, shuffle=True, num_workers=2) val_loader = DataLoader(val_dataset, batch_size, shuffle=False, num_workers=2) model = torchvision.models.segmentation.deeplabv3_resnet50( weights=torchvision.models.segmentation.DeepLabV3_ResNet50_Weights.DEFAULT, progress=True, ) # # By default deeplabv3 has 21 classes as an output, you need to change head for custom data from torchvision.models.segmentation.deeplabv3 import DeepLabHead from torchvision.models.segmentation.fcn import FCNHead device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.classifier = DeepLabHead(2048, 23) model.aux_classifier = FCNHead(1024, 23) model = model.to(device) # # I use Cross Entropy loss, you can also try different ones, for example, Dice loss. You can look on implementations here # ## https://www.kaggle.com/code/bigironsphere/loss-function-library-keras-pytorch from torch.nn import CrossEntropyLoss import torch.nn.functional as F loss = CrossEntropyLoss().to(device) learning_rate = 0.01 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # ## Pixel accuracy def pixel_accuracy(mask, output): output_softmax = F.softmax(output, dim=1) output_argmax = torch.argmax(output_softmax, dim=1) bool_tensor = (torch.flatten(mask)) == (torch.flatten(output_argmax)) return torch.sum(bool_tensor) / torch.numel(bool_tensor) # # Train from tqdm import tqdm epoch_count = 30 train_losses = [] val_losses = [] train_accs = [] val_accs = [] es_steps = 3 count_steps = 0 train_len = len(train_loader) val_len = len(val_loader) print(train_len) print(val_len) best_score = 1e10 for epoch in range(epoch_count): if count_steps >= es_steps: print("Early stopping!") break train_loss_sum = 0 train_pixel_acc = 0 model.train() for img_batch, mask_batch in tqdm(train_loader): img_batch = img_batch.to(device, non_blocking=True) mask_batch = mask_batch.to(device, non_blocking=True) img_batch, mask_batch = img_transform(img_batch, mask_batch, is_val=False) optimizer.zero_grad() output_batch = model(img_batch) loss_value = loss(output_batch["out"], mask_batch) train_pixel_acc += pixel_accuracy(mask_batch, output_batch["out"]).detach() train_loss_sum += loss_value.detach() loss_value.backward() optimizer.step() del output_batch train_loss = train_loss_sum / train_len train_acc = train_pixel_acc / train_len train_losses.append(train_loss) train_accs.append(train_acc) print( f"Epoch {epoch} / {epoch_count} | train loss = {train_loss} | train acc = {train_acc}" ) model.eval() val_loss_sum = 0 val_pixel_acc = 0 for img_batch, mask_batch in tqdm(val_loader): img_batch = img_batch.to(device, non_blocking=True) mask_batch = mask_batch.to(device, non_blocking=True) img_batch, mask_batch = img_transform(img_batch, mask_batch, is_val=True) output_batch = model(img_batch) loss_value = loss(output_batch["out"], mask_batch) val_loss_sum = val_loss_sum + loss_value.detach() val_pixel_acc = ( val_pixel_acc + pixel_accuracy(mask_batch, output_batch["out"]).detach() ) del output_batch val_loss = val_loss_sum / val_len val_acc = val_pixel_acc / val_len val_losses.append(val_loss) val_accs.append(val_acc) print( f"Epoch {epoch} / {epoch_count} | val loss = {val_loss} | val acc = {val_acc}" ) if val_loss < best_score: best_score = val_loss count_steps = 0 torch.save(model, "best_model.pt") else: count_steps += 1 import matplotlib.pyplot as plt train_losses = [x.cpu().item() for x in train_losses] val_losses = [x.cpu().item() for x in val_losses] plt.plot(train_losses, linestyle="-") plt.plot(val_losses, linestyle="--") plt.xlabel("epochs") plt.ylabel("loss") plt.show() train_accs = [x.cpu().item() for x in train_accs] val_accs = [x.cpu().item() for x in val_accs] plt.plot(train_accs, linestyle="-") plt.plot(val_accs, linestyle="--") plt.xlabel("epochs") plt.ylabel("accuracy") plt.show() # # Inference model.eval() label_map = np.array( [ (0, 0, 0), # unlabeled (128, 64, 128), # paved-area (130, 76, 0), # dirt (0, 102, 0), # grass (112, 103, 87), # gravel (28, 42, 168), # water (48, 41, 30), # rocks (0, 50, 89), # pool (107, 142, 35), # vegetation (70, 70, 70), # roof (102, 102, 156), # wall (254, 228, 12), # window (254, 148, 12), # door (190, 153, 153), # fence (153, 153, 153), # fence-pole (255, 22, 96), # person (102, 51, 0), # dog (9, 143, 150), # car (119, 11, 32), # bicycle (51, 51, 0), # tree (190, 250, 190), # bald-tree (112, 150, 146), # art-marker (2, 135, 115), # obstacle (255, 0, 0), # conflicting ] ) def draw_segmentation_map(outputs): labels = torch.argmax(outputs.squeeze(), dim=0).numpy() # Create 3 Numpy arrays containing zeros. # Later each pixel will be filled with respective red, green, and blue pixels # depending on the predicted class. red_map = np.zeros_like(labels).astype(np.uint8) green_map = np.zeros_like(labels).astype(np.uint8) blue_map = np.zeros_like(labels).astype(np.uint8) for label_num in range(0, len(label_map)): index = labels == label_num R, G, B = label_map[label_num] red_map[index] = R green_map[index] = G blue_map[index] = B segmentation_map = np.stack([red_map, green_map, blue_map], axis=2) return segmentation_map def image_overlay(image, segmented_image): alpha = 1 # transparency for the original image beta = 0.8 # transparency for the segmentation map gamma = 0 # scalar added to each sum image = np.array(image) segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR) image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image) return image imgs_paths = os.listdir(image_path) imgs_paths.sort() def perform_inference( model=model, imgs_paths=imgs_paths, num_images=10, image_dir="/content/dataset/semantic_drone_dataset/original_images/", device="cpu", ): device = ( device if device is not None else ("cuda" if torch.cuda.is_available() else "cpu") ) model.to(device) preprocess = transforms.Compose( [ transforms.Resize([520, 520]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) # Load image handles for the validation set. # Randomly select 'num_images' from the whole set for inference. selected_images = np.random.choice(imgs_paths, num_images, replace=False) # Iterate over selected images for img_name in selected_images: # Load and pre-process image. image_path = os.path.join(image_dir, img_name) img_raw = Image.open(image_path).convert("RGB") W, H = img_raw.size[:2] img_t = preprocess(img_raw) img_t = torch.unsqueeze(img_t, dim=0).to(device) # Model Inference with torch.no_grad(): output = model(img_t)["out"].cpu() # Get RGB segmentation map segmented_image = draw_segmentation_map(output) # Resize to original image size segmented_image = cv2.resize(segmented_image, (W, H), cv2.INTER_LINEAR) overlayed_image = image_overlay(img_raw, segmented_image) # Plot plt.figure(figsize=(12, 10), dpi=100) plt.subplot(1, 3, 1) plt.axis("off") plt.title("Image") plt.imshow(np.asarray(img_raw)) plt.subplot(1, 3, 2) plt.title("Segmentation") plt.axis("off") plt.imshow(segmented_image) plt.subplot(1, 3, 3) plt.title("Overlayed") plt.axis("off") plt.imshow(overlayed_image[:, :, ::-1]) plt.show() plt.close() return perform_inference()
false
0
226,162
3
226,783
226,162
129010000
import pandas as pd import numpy as np from PIL import Image import matplotlib.pyplot as plt from sklearn.metrics import classification_report from sklearn.metrics import matthews_corrcoef from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import roc_curve from google.colab import drive drive.mount("/content/drive") # # load image (VGG16, autoencoder output) and text vectors topic modeling(LSA-BOW) # # training data img_label = np.load( "/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy" ) txt_input = np.load("/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy") img = img_label[:, 0:-1] label = img_label[:, -1] txt = txt_input img_txt = np.concatenate((img, txt), axis=1) x = img_txt y = label print(x.shape) y.shape # # validation data img_label_val = np.load( "/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy" ) txt_input_val = np.load("/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy") img_val = img_label_val[:, 0:-1] label_val = img_label_val[:, -1] # txt_val=txt_input_val-np.min(txt_input_val) txt_val = txt_input_val img_txt_val = np.concatenate((img_val, txt_val), axis=1) x_val = img_txt_val y_val = label_val x_val.shape # # Find optimal threshold from the ROC curve and apply that threshold for classification # # Logistic regression classifier clf = LogisticRegression(max_iter=1000, C=0.1, penalty="l2") clf.fit(x, y) y_pred_p = clf.predict_proba(x) b = y_pred_p[:, -1] b = b.reshape(y.shape) fpr, tpr, th = roc_curve(y, b) plt.figure(figsize=(7, 5)) plt.plot(fpr, tpr, label="LSA-LR", linewidth=2) plt.grid() plt.title("ROC") plt.xlabel("False positive rate-------->") plt.ylabel("True positive rate--------->") plt.legend(loc="lower right") optimal_idx = np.argmax(tpr - fpr) optimal_threshold = th[optimal_idx] print("optimal_threshold", optimal_threshold, "\n") y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y, y_pred)) print(matthews_corrcoef(y, y_pred)) print("\n\nvalidation") y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0) # y_pred_val=clf.predict(x_val) print(classification_report(y_val, y_pred_val)) print(matthews_corrcoef(y_val, y_pred_val)) # # Random Forest clf = RandomForestClassifier(max_depth=3, random_state=0) clf.fit(x, y) y_pred_p = clf.predict_proba(x) b = y_pred_p[:, -1] b = b.reshape(y.shape) fpr, tpr, th = roc_curve(y, b) plt.figure(figsize=(7, 5)) plt.plot(fpr, tpr, label="LSA-LR", linewidth=2) plt.grid() plt.title("ROC") plt.xlabel("False positive rate-------->") plt.ylabel("True positive rate--------->") plt.legend(loc="lower right") optimal_idx = np.argmax(tpr - fpr) optimal_threshold = th[optimal_idx] print("optimal_threshold", optimal_threshold, "\n") y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y, y_pred)) print(matthews_corrcoef(y, y_pred)) print("\n\nvalidation") y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y_val, y_pred_val)) print(matthews_corrcoef(y_val, y_pred_val)) # # #load text vectors(TFIDF-NMF) # # train set txt_input = np.load("/content/drive/MyDrive/HMD_project/new/nmf_tfidf_train_text.npy") txt = txt_input img_txt = np.concatenate((img, txt), axis=1) x = img_txt y = label print(y.shape) x.shape # # validation set txt_input_val = np.load("/content/drive/MyDrive/HMD_project/new/nmf_tfidf_val_text.npy") txt_val = txt_input_val img_txt_val = np.concatenate((img_val, txt_val), axis=1) x_val = img_txt_val y_val = label_val print(x_val.shape) y_val.shape # # Random Forest clf = RandomForestClassifier(n_estimators=100, max_depth=3, random_state=1) clf.fit(x, y) y_pred_p = clf.predict_proba(x) b = y_pred_p[:, -1] b = b.reshape(y.shape) fpr, tpr, th = roc_curve(y, b) plt.figure(figsize=(7, 5)) plt.plot(fpr, tpr, label="LSA-LR", linewidth=2) plt.grid() plt.title("ROC") plt.xlabel("False positive rate-------->") plt.ylabel("True positive rate--------->") plt.legend(loc="lower right") optimal_idx = np.argmax(tpr - fpr) optimal_threshold = th[optimal_idx] print("optimal_threshold", optimal_threshold, "\n") y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y, y_pred)) print(matthews_corrcoef(y, y_pred)) print("\n\nvalidation") y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y_val, y_pred_val)) print(matthews_corrcoef(y_val, y_pred_val)) # # Logistic regression clf = LogisticRegression(max_iter=2500, C=50) clf.fit(x, y) y_pred_p = clf.predict_proba(x) b = y_pred_p[:, -1] b = b.reshape(y.shape) fpr, tpr, th = roc_curve(y, b) plt.figure(figsize=(7, 5)) plt.plot(fpr, tpr, label="LSA-LR", linewidth=2) plt.grid() plt.title("ROC") plt.xlabel("False positive rate-------->") plt.ylabel("True positive rate--------->") plt.legend(loc="lower right") optimal_idx = np.argmax(tpr - fpr) optimal_threshold = th[optimal_idx] print("optimal_threshold", optimal_threshold, "\n") y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y, y_pred)) print(matthews_corrcoef(y, y_pred)) print("\n\nvalidation") y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y_val, y_pred_val)) print(matthews_corrcoef(y_val, y_pred_val))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/010/129010000.ipynb
null
null
[{"Id": 129010000, "ScriptId": 38349713, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13939396, "CreationDate": "05/10/2023 09:37:28", "VersionNumber": 1.0, "Title": "Classifier_Automatic_threshold", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 242.0, "LinesInsertedFromPrevious": 242.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np from PIL import Image import matplotlib.pyplot as plt from sklearn.metrics import classification_report from sklearn.metrics import matthews_corrcoef from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import roc_curve from google.colab import drive drive.mount("/content/drive") # # load image (VGG16, autoencoder output) and text vectors topic modeling(LSA-BOW) # # training data img_label = np.load( "/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy" ) txt_input = np.load("/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy") img = img_label[:, 0:-1] label = img_label[:, -1] txt = txt_input img_txt = np.concatenate((img, txt), axis=1) x = img_txt y = label print(x.shape) y.shape # # validation data img_label_val = np.load( "/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy" ) txt_input_val = np.load("/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy") img_val = img_label_val[:, 0:-1] label_val = img_label_val[:, -1] # txt_val=txt_input_val-np.min(txt_input_val) txt_val = txt_input_val img_txt_val = np.concatenate((img_val, txt_val), axis=1) x_val = img_txt_val y_val = label_val x_val.shape # # Find optimal threshold from the ROC curve and apply that threshold for classification # # Logistic regression classifier clf = LogisticRegression(max_iter=1000, C=0.1, penalty="l2") clf.fit(x, y) y_pred_p = clf.predict_proba(x) b = y_pred_p[:, -1] b = b.reshape(y.shape) fpr, tpr, th = roc_curve(y, b) plt.figure(figsize=(7, 5)) plt.plot(fpr, tpr, label="LSA-LR", linewidth=2) plt.grid() plt.title("ROC") plt.xlabel("False positive rate-------->") plt.ylabel("True positive rate--------->") plt.legend(loc="lower right") optimal_idx = np.argmax(tpr - fpr) optimal_threshold = th[optimal_idx] print("optimal_threshold", optimal_threshold, "\n") y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y, y_pred)) print(matthews_corrcoef(y, y_pred)) print("\n\nvalidation") y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0) # y_pred_val=clf.predict(x_val) print(classification_report(y_val, y_pred_val)) print(matthews_corrcoef(y_val, y_pred_val)) # # Random Forest clf = RandomForestClassifier(max_depth=3, random_state=0) clf.fit(x, y) y_pred_p = clf.predict_proba(x) b = y_pred_p[:, -1] b = b.reshape(y.shape) fpr, tpr, th = roc_curve(y, b) plt.figure(figsize=(7, 5)) plt.plot(fpr, tpr, label="LSA-LR", linewidth=2) plt.grid() plt.title("ROC") plt.xlabel("False positive rate-------->") plt.ylabel("True positive rate--------->") plt.legend(loc="lower right") optimal_idx = np.argmax(tpr - fpr) optimal_threshold = th[optimal_idx] print("optimal_threshold", optimal_threshold, "\n") y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y, y_pred)) print(matthews_corrcoef(y, y_pred)) print("\n\nvalidation") y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y_val, y_pred_val)) print(matthews_corrcoef(y_val, y_pred_val)) # # #load text vectors(TFIDF-NMF) # # train set txt_input = np.load("/content/drive/MyDrive/HMD_project/new/nmf_tfidf_train_text.npy") txt = txt_input img_txt = np.concatenate((img, txt), axis=1) x = img_txt y = label print(y.shape) x.shape # # validation set txt_input_val = np.load("/content/drive/MyDrive/HMD_project/new/nmf_tfidf_val_text.npy") txt_val = txt_input_val img_txt_val = np.concatenate((img_val, txt_val), axis=1) x_val = img_txt_val y_val = label_val print(x_val.shape) y_val.shape # # Random Forest clf = RandomForestClassifier(n_estimators=100, max_depth=3, random_state=1) clf.fit(x, y) y_pred_p = clf.predict_proba(x) b = y_pred_p[:, -1] b = b.reshape(y.shape) fpr, tpr, th = roc_curve(y, b) plt.figure(figsize=(7, 5)) plt.plot(fpr, tpr, label="LSA-LR", linewidth=2) plt.grid() plt.title("ROC") plt.xlabel("False positive rate-------->") plt.ylabel("True positive rate--------->") plt.legend(loc="lower right") optimal_idx = np.argmax(tpr - fpr) optimal_threshold = th[optimal_idx] print("optimal_threshold", optimal_threshold, "\n") y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y, y_pred)) print(matthews_corrcoef(y, y_pred)) print("\n\nvalidation") y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y_val, y_pred_val)) print(matthews_corrcoef(y_val, y_pred_val)) # # Logistic regression clf = LogisticRegression(max_iter=2500, C=50) clf.fit(x, y) y_pred_p = clf.predict_proba(x) b = y_pred_p[:, -1] b = b.reshape(y.shape) fpr, tpr, th = roc_curve(y, b) plt.figure(figsize=(7, 5)) plt.plot(fpr, tpr, label="LSA-LR", linewidth=2) plt.grid() plt.title("ROC") plt.xlabel("False positive rate-------->") plt.ylabel("True positive rate--------->") plt.legend(loc="lower right") optimal_idx = np.argmax(tpr - fpr) optimal_threshold = th[optimal_idx] print("optimal_threshold", optimal_threshold, "\n") y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y, y_pred)) print(matthews_corrcoef(y, y_pred)) print("\n\nvalidation") y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0) print(classification_report(y_val, y_pred_val)) print(matthews_corrcoef(y_val, y_pred_val))
false
0
2,105
0
2,105
2,105
129010475
<jupyter_start><jupyter_text>Used Cars Price Prediction Kaggle dataset identifier: used-cars-price-prediction <jupyter_code>import pandas as pd df = pd.read_csv('used-cars-price-prediction/train-data.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 6019 entries, 0 to 6018 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 6019 non-null int64 1 Name 6019 non-null object 2 Location 6019 non-null object 3 Year 6019 non-null int64 4 Kilometers_Driven 6019 non-null int64 5 Fuel_Type 6019 non-null object 6 Transmission 6019 non-null object 7 Owner_Type 6019 non-null object 8 Mileage 6017 non-null object 9 Engine 5983 non-null object 10 Power 5983 non-null object 11 Seats 5977 non-null float64 12 New_Price 824 non-null object 13 Price 6019 non-null float64 dtypes: float64(2), int64(3), object(9) memory usage: 658.5+ KB <jupyter_text>Examples: { "Unnamed: 0": 0, "Name": "Maruti Wagon R LXI CNG", "Location": "Mumbai", "Year": 2010, "Kilometers_Driven": 72000, "Fuel_Type": "CNG", "Transmission": "Manual", "Owner_Type": "First", "Mileage": "26.6 km/kg", "Engine": "998 CC", "Power": "58.16 bhp", "Seats": 5, "New_Price": null, "Price": 1.75 } { "Unnamed: 0": 1, "Name": "Hyundai Creta 1.6 CRDi SX Option", "Location": "Pune", "Year": 2015, "Kilometers_Driven": 41000, "Fuel_Type": "Diesel", "Transmission": "Manual", "Owner_Type": "First", "Mileage": "19.67 kmpl", "Engine": "1582 CC", "Power": "126.2 bhp", "Seats": 5, "New_Price": null, "Price": 12.5 } { "Unnamed: 0": 2, "Name": "Honda Jazz V", "Location": "Chennai", "Year": 2011, "Kilometers_Driven": 46000, "Fuel_Type": "Petrol", "Transmission": "Manual", "Owner_Type": "First", "Mileage": "18.2 kmpl", "Engine": "1199 CC", "Power": "88.7 bhp", "Seats": 5, "New_Price": "8.61 Lakh", "Price": 4.5 } { "Unnamed: 0": 3, "Name": "Maruti Ertiga VDI", "Location": "Chennai", "Year": 2012, "Kilometers_Driven": 87000, "Fuel_Type": "Diesel", "Transmission": "Manual", "Owner_Type": "First", "Mileage": "20.77 kmpl", "Engine": "1248 CC", "Power": "88.76 bhp", "Seats": 7, "New_Price": null, "Price": 6.0 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np df = pd.read_csv( "/kaggle/input/used-cars-price-prediction/train-data.csv" ) # df is an object of a class DataFrame which we create after reading the file for this case its the csv df.head() # Read the top 5 records of the dataset df.head(-1) # this will print all the records from the dataset df.head( 2 ) # if we pass 2 in the paranthesis it will print the top 2 records and if we pass 5 it will print 5 df.sample(5) # get us randomly chosen 5 records unique_location = df["Location"].unique() # data type of this unique_location variable unique_location_list = unique_location.tolist() # converting the array to a list len(unique_location_list) # number of unique cities len(df["Location"].unique()) # Get the types of Fuel in the dataset? How many types of seater the cars are in the dataset? df["New_Price"].isnull().sum() df2 = df.drop( ["New_Price"], axis=1, inplace=False ) # axis = 1 means its a column and axis = 0 means its a row, if inplace is false it will not update the main datafreame and for that we need to use another dataframe to store that previously updated dataframe df2.head() df.head(1) df.drop( ["New_Price"], axis=1, inplace=True ) # if we pass true it will update the dataframe and save it inside df.head(1) # Your task is to drop thos unessesarry column 'Unnamed: 0' df.describe().T df.info() unique_fuel = df["Fuel_Type"].unique() unique_fuel_list = unique_fuel.tolist() unique_fuel_list df["Seats"].isnull().sum()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/010/129010475.ipynb
used-cars-price-prediction
avikasliwal
[{"Id": 129010475, "ScriptId": 38342612, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14950540, "CreationDate": "05/10/2023 09:41:43", "VersionNumber": 1.0, "Title": "notebook649cad9d3f", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 66.0, "LinesInsertedFromPrevious": 66.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184690050, "KernelVersionId": 129010475, "SourceDatasetVersionId": 518431}]
[{"Id": 518431, "DatasetId": 245550, "DatasourceVersionId": 534662, "CreatorUserId": 2716677, "LicenseName": "Other (specified in description)", "CreationDate": "06/25/2019 10:26:52", "VersionNumber": 2.0, "Title": "Used Cars Price Prediction", "Slug": "used-cars-price-prediction", "Subtitle": "Predict the price of an unknown car. Build your own Algo for cars 24 !!", "Description": NaN, "VersionNotes": "Replaced xlsx files by csv files", "TotalCompressedBytes": 791875.0, "TotalUncompressedBytes": 791875.0}]
[{"Id": 245550, "CreatorUserId": 2716677, "OwnerUserId": 2716677.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 518431.0, "CurrentDatasourceVersionId": 534662.0, "ForumId": 256748, "Type": 2, "CreationDate": "06/25/2019 10:11:54", "LastActivityDate": "06/25/2019", "TotalViews": 140175, "TotalDownloads": 22061, "TotalVotes": 222, "TotalKernels": 107}]
[{"Id": 2716677, "UserName": "avikasliwal", "DisplayName": "Avi Kasliwal", "RegisterDate": "01/18/2019", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np df = pd.read_csv( "/kaggle/input/used-cars-price-prediction/train-data.csv" ) # df is an object of a class DataFrame which we create after reading the file for this case its the csv df.head() # Read the top 5 records of the dataset df.head(-1) # this will print all the records from the dataset df.head( 2 ) # if we pass 2 in the paranthesis it will print the top 2 records and if we pass 5 it will print 5 df.sample(5) # get us randomly chosen 5 records unique_location = df["Location"].unique() # data type of this unique_location variable unique_location_list = unique_location.tolist() # converting the array to a list len(unique_location_list) # number of unique cities len(df["Location"].unique()) # Get the types of Fuel in the dataset? How many types of seater the cars are in the dataset? df["New_Price"].isnull().sum() df2 = df.drop( ["New_Price"], axis=1, inplace=False ) # axis = 1 means its a column and axis = 0 means its a row, if inplace is false it will not update the main datafreame and for that we need to use another dataframe to store that previously updated dataframe df2.head() df.head(1) df.drop( ["New_Price"], axis=1, inplace=True ) # if we pass true it will update the dataframe and save it inside df.head(1) # Your task is to drop thos unessesarry column 'Unnamed: 0' df.describe().T df.info() unique_fuel = df["Fuel_Type"].unique() unique_fuel_list = unique_fuel.tolist() unique_fuel_list df["Seats"].isnull().sum()
[{"used-cars-price-prediction/train-data.csv": {"column_names": "[\"Unnamed: 0\", \"Name\", \"Location\", \"Year\", \"Kilometers_Driven\", \"Fuel_Type\", \"Transmission\", \"Owner_Type\", \"Mileage\", \"Engine\", \"Power\", \"Seats\", \"New_Price\", \"Price\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"Name\": \"object\", \"Location\": \"object\", \"Year\": \"int64\", \"Kilometers_Driven\": \"int64\", \"Fuel_Type\": \"object\", \"Transmission\": \"object\", \"Owner_Type\": \"object\", \"Mileage\": \"object\", \"Engine\": \"object\", \"Power\": \"object\", \"Seats\": \"float64\", \"New_Price\": \"object\", \"Price\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6019 entries, 0 to 6018\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 6019 non-null int64 \n 1 Name 6019 non-null object \n 2 Location 6019 non-null object \n 3 Year 6019 non-null int64 \n 4 Kilometers_Driven 6019 non-null int64 \n 5 Fuel_Type 6019 non-null object \n 6 Transmission 6019 non-null object \n 7 Owner_Type 6019 non-null object \n 8 Mileage 6017 non-null object \n 9 Engine 5983 non-null object \n 10 Power 5983 non-null object \n 11 Seats 5977 non-null float64\n 12 New_Price 824 non-null object \n 13 Price 6019 non-null float64\ndtypes: float64(2), int64(3), object(9)\nmemory usage: 658.5+ KB\n", "summary": "{\"Unnamed: 0\": {\"count\": 6019.0, \"mean\": 3009.0, \"std\": 1737.6799666988932, \"min\": 0.0, \"25%\": 1504.5, \"50%\": 3009.0, \"75%\": 4513.5, \"max\": 6018.0}, \"Year\": {\"count\": 6019.0, \"mean\": 2013.3581990363848, \"std\": 3.2697421160913964, \"min\": 1998.0, \"25%\": 2011.0, \"50%\": 2014.0, \"75%\": 2016.0, \"max\": 2019.0}, \"Kilometers_Driven\": {\"count\": 6019.0, \"mean\": 58738.38029573019, \"std\": 91268.84320624862, \"min\": 171.0, \"25%\": 34000.0, \"50%\": 53000.0, \"75%\": 73000.0, \"max\": 6500000.0}, \"Seats\": {\"count\": 5977.0, \"mean\": 5.278735151413753, \"std\": 0.8088395547482927, \"min\": 0.0, \"25%\": 5.0, \"50%\": 5.0, \"75%\": 5.0, \"max\": 10.0}, \"Price\": {\"count\": 6019.0, \"mean\": 9.47946835022429, \"std\": 11.1879171124555, \"min\": 0.44, \"25%\": 3.5, \"50%\": 5.64, \"75%\": 9.95, \"max\": 160.0}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"Name\":{\"0\":\"Maruti Wagon R LXI CNG\",\"1\":\"Hyundai Creta 1.6 CRDi SX Option\",\"2\":\"Honda Jazz V\",\"3\":\"Maruti Ertiga VDI\"},\"Location\":{\"0\":\"Mumbai\",\"1\":\"Pune\",\"2\":\"Chennai\",\"3\":\"Chennai\"},\"Year\":{\"0\":2010,\"1\":2015,\"2\":2011,\"3\":2012},\"Kilometers_Driven\":{\"0\":72000,\"1\":41000,\"2\":46000,\"3\":87000},\"Fuel_Type\":{\"0\":\"CNG\",\"1\":\"Diesel\",\"2\":\"Petrol\",\"3\":\"Diesel\"},\"Transmission\":{\"0\":\"Manual\",\"1\":\"Manual\",\"2\":\"Manual\",\"3\":\"Manual\"},\"Owner_Type\":{\"0\":\"First\",\"1\":\"First\",\"2\":\"First\",\"3\":\"First\"},\"Mileage\":{\"0\":\"26.6 km\\/kg\",\"1\":\"19.67 kmpl\",\"2\":\"18.2 kmpl\",\"3\":\"20.77 kmpl\"},\"Engine\":{\"0\":\"998 CC\",\"1\":\"1582 CC\",\"2\":\"1199 CC\",\"3\":\"1248 CC\"},\"Power\":{\"0\":\"58.16 bhp\",\"1\":\"126.2 bhp\",\"2\":\"88.7 bhp\",\"3\":\"88.76 bhp\"},\"Seats\":{\"0\":5.0,\"1\":5.0,\"2\":5.0,\"3\":7.0},\"New_Price\":{\"0\":null,\"1\":null,\"2\":\"8.61 Lakh\",\"3\":null},\"Price\":{\"0\":1.75,\"1\":12.5,\"2\":4.5,\"3\":6.0}}"}}]
true
1
<start_data_description><data_path>used-cars-price-prediction/train-data.csv: <column_names> ['Unnamed: 0', 'Name', 'Location', 'Year', 'Kilometers_Driven', 'Fuel_Type', 'Transmission', 'Owner_Type', 'Mileage', 'Engine', 'Power', 'Seats', 'New_Price', 'Price'] <column_types> {'Unnamed: 0': 'int64', 'Name': 'object', 'Location': 'object', 'Year': 'int64', 'Kilometers_Driven': 'int64', 'Fuel_Type': 'object', 'Transmission': 'object', 'Owner_Type': 'object', 'Mileage': 'object', 'Engine': 'object', 'Power': 'object', 'Seats': 'float64', 'New_Price': 'object', 'Price': 'float64'} <dataframe_Summary> {'Unnamed: 0': {'count': 6019.0, 'mean': 3009.0, 'std': 1737.6799666988932, 'min': 0.0, '25%': 1504.5, '50%': 3009.0, '75%': 4513.5, 'max': 6018.0}, 'Year': {'count': 6019.0, 'mean': 2013.3581990363848, 'std': 3.2697421160913964, 'min': 1998.0, '25%': 2011.0, '50%': 2014.0, '75%': 2016.0, 'max': 2019.0}, 'Kilometers_Driven': {'count': 6019.0, 'mean': 58738.38029573019, 'std': 91268.84320624862, 'min': 171.0, '25%': 34000.0, '50%': 53000.0, '75%': 73000.0, 'max': 6500000.0}, 'Seats': {'count': 5977.0, 'mean': 5.278735151413753, 'std': 0.8088395547482927, 'min': 0.0, '25%': 5.0, '50%': 5.0, '75%': 5.0, 'max': 10.0}, 'Price': {'count': 6019.0, 'mean': 9.47946835022429, 'std': 11.1879171124555, 'min': 0.44, '25%': 3.5, '50%': 5.64, '75%': 9.95, 'max': 160.0}} <dataframe_info> RangeIndex: 6019 entries, 0 to 6018 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 6019 non-null int64 1 Name 6019 non-null object 2 Location 6019 non-null object 3 Year 6019 non-null int64 4 Kilometers_Driven 6019 non-null int64 5 Fuel_Type 6019 non-null object 6 Transmission 6019 non-null object 7 Owner_Type 6019 non-null object 8 Mileage 6017 non-null object 9 Engine 5983 non-null object 10 Power 5983 non-null object 11 Seats 5977 non-null float64 12 New_Price 824 non-null object 13 Price 6019 non-null float64 dtypes: float64(2), int64(3), object(9) memory usage: 658.5+ KB <some_examples> {'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'Name': {'0': 'Maruti Wagon R LXI CNG', '1': 'Hyundai Creta 1.6 CRDi SX Option', '2': 'Honda Jazz V', '3': 'Maruti Ertiga VDI'}, 'Location': {'0': 'Mumbai', '1': 'Pune', '2': 'Chennai', '3': 'Chennai'}, 'Year': {'0': 2010, '1': 2015, '2': 2011, '3': 2012}, 'Kilometers_Driven': {'0': 72000, '1': 41000, '2': 46000, '3': 87000}, 'Fuel_Type': {'0': 'CNG', '1': 'Diesel', '2': 'Petrol', '3': 'Diesel'}, 'Transmission': {'0': 'Manual', '1': 'Manual', '2': 'Manual', '3': 'Manual'}, 'Owner_Type': {'0': 'First', '1': 'First', '2': 'First', '3': 'First'}, 'Mileage': {'0': '26.6 km/kg', '1': '19.67 kmpl', '2': '18.2 kmpl', '3': '20.77 kmpl'}, 'Engine': {'0': '998 CC', '1': '1582 CC', '2': '1199 CC', '3': '1248 CC'}, 'Power': {'0': '58.16 bhp', '1': '126.2 bhp', '2': '88.7 bhp', '3': '88.76 bhp'}, 'Seats': {'0': 5.0, '1': 5.0, '2': 5.0, '3': 7.0}, 'New_Price': {'0': None, '1': None, '2': '8.61 Lakh', '3': None}, 'Price': {'0': 1.75, '1': 12.5, '2': 4.5, '3': 6.0}} <end_description>
637
0
1,645
637
129067066
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os from datetime import datetime, timedelta import pyedflib import numpy as np import pandas as pd from scipy import signal from scipy.signal import butter, lfilter import matplotlib.pyplot as plt from datetime import datetime, timedelta import os from math import floor import copy def create_dir(directory_path): """Method to create a directory. Returns True if the directory already exists.""" if os.path.exists(directory_path): return True else: os.makedirs(directory_path) return False def round_down(num, divisor): """Method to round down a number""" return num - (num % divisor) def get_time(datetime_string): """Method to convert a string to a datetime object.""" time = 0 try: time = datetime.strptime(datetime_string, "%H:%M:%S") except ValueError: datetime_string = " " + datetime_string if " 24" in datetime_string: datetime_string = datetime_string.replace(" 24", "23") time = datetime.strptime(datetime_string, "%H:%M:%S") time += timedelta(hours=1) else: datetime_string = datetime_string.replace(" 25", "23") time = datetime.strptime(datetime_string, "%H:%M:%S") time += timedelta(hours=2) return time def extract_interval_data( patient, data_dir, extract_ictal_samples=True, extract_preictal_samples=True, ictal_interval_padding_duration=32, seizure_occurance_period=30, seizure_prediction_horizon=5, ): """Method to extract interval patient data.""" patient_summary = open( os.path.join(data_dir, "chb%02d" % patient, "chb%02d-summary.txt" % patient), "r", ) interictal_intervals = [] interictal_files = [] ictal_intervals = [] ictal_files = [] preictal_intervals = [] preictal_files = [] line = patient_summary.readline() start_time = datetime.min old_time = datetime.min line_number = 0 while line: line_data = line.split(":") if line_data[0] == "File Name": file_name = line_data[1].strip() s = get_time(patient_summary.readline().split(": ")[1].strip()) if line_number == 0: start_time = s while s < old_time: s += timedelta(hours=24) old_time = s end_time_file = get_time(patient_summary.readline().split(": ")[1].strip()) while end_time_file < old_time: end_time_file = end_time_file + timedelta(hours=24) old_time = end_time_file n_seizures = int(patient_summary.readline().split(": ")[1]) if n_seizures == 0: # Extract interictal interval data interictal_intervals.append([s, end_time_file]) interictal_files.append([s, end_time_file, file_name]) else: # Extract ictal and preictal interval data for i in range(0, n_seizures): seconds_start = int( patient_summary.readline().split(": ")[1].split(" ")[0] ) seconds_end = int( patient_summary.readline().split(": ")[1].split(" ")[0] ) if extract_ictal_samples: # Extract ictal interval data interval_start = s + timedelta(seconds=seconds_start) if ( len(ictal_intervals) == 0 or interval_start > datetime.min ) and interval_start - start_time > timedelta(minutes=20): interval_end = s + timedelta(seconds=seconds_end) ictal_intervals.append( [ interval_start - timedelta( seconds=ictal_interval_padding_duration ), interval_end + timedelta( seconds=ictal_interval_padding_duration ), ] ) ictal_files.append([s, end_time_file, file_name]) if extract_preictal_samples: # Extract preictal interval data interval_start = ( s + timedelta(seconds=seconds_start) - timedelta( minutes=seizure_prediction_horizon + seizure_occurance_period ) ) if ( len(preictal_intervals) == 0 or interval_start > datetime.min ) and interval_start - start_time > timedelta(minutes=20): interval_end = interval_start + timedelta( minutes=seizure_occurance_period ) preictal_intervals.append([interval_start, interval_end]) preictal_files.append([s, end_time_file, file_name]) line = patient_summary.readline() line_number += 1 patient_summary.close() return ( interictal_intervals, interictal_files, ictal_intervals, ictal_files, preictal_intervals, preictal_files, ) def load_patient_data(patient, file, data_dir): """Method to load patient data.""" f = pyedflib.EdfReader("%schb%02d/%s" % (data_dir, patient, file)) n = f.signals_in_file signals = np.zeros((n, f.getNSamples()[0])) for i in np.arange(n): signals[i, :] = f.readSignal(i) return signals def extract_batches_from_interval( patient, data_dir, file, file_start, file_end, interval_start, interval_end, segment_index, n_channels, ): """Method to extract batch samples from specified intervals.""" start = 0 if file_start < interval_start: start = (interval_start - file_start).seconds * sample_rate if file_end <= interval_end: end = -1 data = load_patient_data(patient, file[2], data_dir)[:, start:] else: end = ((interval_end - file_start).seconds * sample_rate) + 1 data = load_patient_data(patient, file[2], data_dir)[:, start : end + 1] if (data.shape[0] >= n_channels) and (data.shape[1] >= sample_rate * window_size): truncated_len = round_down(data.shape[1], sample_rate * window_size) return ( np.array( np.split( data[0:n_channels, 0:truncated_len], truncated_len / (sample_rate * window_size), axis=1, ) ).swapaxes(0, 1), segment_index, ) else: return np.array([]), segment_index def extract_batches( patient, file, data_dir, segment_index, intervals, sample_rate, window_size, n_channels, ): """Method to extract batches.""" file_start = file[0] file_end = file[1] interval_start = intervals[segment_index][0] interval_end = intervals[segment_index][1] while file_start > interval_end and segment_index < len(intervals) - 1: segment_index += 1 interval_start = intervals[segment_index][0] interval_end = intervals[segment_index][1] if (interval_end - interval_start).seconds >= window_size: return extract_batches_from_interval( patient, data_dir, file, file_start, file_end, interval_start, interval_end, segment_index, n_channels, ) else: return np.array([]), segment_index def gen_synthetic_batches( patient, file, data_dir, segment_index, intervals, sample_rate, window_size, stride_len, n_channels, ): """Method to generate synthetic batches.""" file_start = file[0] file_end = file[1] interval_start = intervals[segment_index][0] interval_end = intervals[segment_index][1] while file_start > interval_end and segment_index < len(intervals) - 1: segment_index += 1 interval_start = intervals[segment_index][0] interval_end = intervals[segment_index][1] if (interval_end - interval_start).seconds > window_size: synthetic_batches = np.array([]).reshape( n_channels, 0, sample_rate * window_size ) synthetic_interval_start = interval_start + timedelta(seconds=stride_len) synthetic_interval_end = synthetic_interval_start + timedelta( seconds=window_size ) while synthetic_interval_end < interval_end: extracted_batches = extract_batches_from_interval( patient, data_dir, file, file_start, file_end, synthetic_interval_start, synthetic_interval_end, segment_index, n_channels, )[0] if extracted_batches.size > 0: synthetic_batches = np.concatenate( (synthetic_batches, extracted_batches), axis=1 ) synthetic_interval_start += timedelta(seconds=stride_len) synthetic_interval_end += timedelta(seconds=stride_len) return synthetic_batches, segment_index else: return np.array([]), segment_index os.path.exists("/kaggle/working/processed_data") n_channels = 22 sample_rate = 256 # Sample rate (Hz) window_size = 64 # Window size (seconds) # Stride length (seconds) used to generate synthetic preictal and ictal samples stride_len = 32 # Data directory path # data_dir = "/scratch/jcu/cl/CHBMIT/chb-mit-scalp-eeg-database-1.0.0/" data_dir = "/kaggle/input/chb01-21/chbmit/" processed_data_dir = ( "/kaggle/working/processed_data/" # Processed data output directory path ) patients = np.arange(1, 24) # Remove patients 4, 6, 7, 12, and 20, as their records contain anomalous data patients = np.delete(patients, [3, 5, 6, 11, 19]) patients = [1] # TEMP ictal_interval_padding_duration = 32 # ------------------------------------------------------------------------------ seizure_occurance_period = 30 # Seizure occurrence period (minutes) seizure_prediction_horizon = 5 # Seizure prediction horizon (minutes) # ----------------------------------------------------------------------- # ------------------------------------------------------------------------------ extract_ictal_samples = False extract_preictal_samples = True generate_synthetic_samples = False # ------------------------------------------------------------------------------ if __name__ == "__main__": for patient in patients: try: print("Patient: %02d" % patient) create_dir(processed_data_dir) ( interictal_intervals, interictal_files, ictal_intervals, ictal_files, preictal_intervals, preictal_files, ) = extract_interval_data( patient, data_dir, extract_ictal_samples, extract_preictal_samples, ictal_interval_padding_duration, seizure_occurance_period, seizure_prediction_horizon, ) if patient == 19: # Disregard the first seizure of patient 19 because it is not considered preictal_intervals.pop(0) interictal_segment_index = 0 interictal_data = np.array([]).reshape( n_channels, 0, sample_rate * window_size ) if extract_ictal_samples: ictal_segment_index = 0 synthetic_ictal_segment_index = 0 ictal_data = copy.deepcopy(interictal_data) synthetic_ictal_data = copy.deepcopy(interictal_data) if extract_preictal_samples: preictal_segment_index = 0 synthetic_preictal_segment_index = 0 preictal_data = copy.deepcopy(interictal_data) synthetic_preictal_data = copy.deepcopy(interictal_data) # Extract interictal samples (batches) for file in interictal_files: data, interictal_segment_index = extract_batches( patient, file, data_dir, interictal_segment_index, interictal_intervals, sample_rate, window_size, n_channels, ) if data.size > 0: interictal_data = np.concatenate((interictal_data, data), axis=1) print("Interictal: ", interictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_interictal.npy" % patient, ), interictal_data, ) del interictal_data if extract_ictal_samples: # Extract ictal samples (batches) for file in ictal_files: data, ictal_segment_index = extract_batches( patient, file, data_dir, ictal_segment_index, ictal_intervals, sample_rate, window_size, n_channels, ) if data.size > 0: ictal_data = np.concatenate((ictal_data, data), axis=1) print("Ictal: ", ictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_ictal.npy" % patient, ), ictal_data, ) del ictal_data if generate_synthetic_samples: # Generate synthetic ictal samples (batches) for file in ictal_files: data, synthetic_ictal_segment_index = gen_synthetic_batches( patient, file, data_dir, synthetic_ictal_segment_index, ictal_intervals, sample_rate, window_size, stride_len, n_channels, ) if data.size > 0: synthetic_ictal_data = np.concatenate( (synthetic_ictal_data, data), axis=1 ) print("Synthetic Ictal: ", synthetic_ictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_synthetic_ictal.npy" % patient, ), synthetic_ictal_data, ) del synthetic_ictal_data if extract_preictal_samples: # Extract preictal samples (batches) for file in preictal_files: data, preictal_segment_index = extract_batches( patient, file, data_dir, preictal_segment_index, preictal_intervals, sample_rate, window_size, n_channels, ) if data.size > 0: preictal_data = np.concatenate((preictal_data, data), axis=1) print("Preictal: ", preictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_preictal.npy" % patient, ), preictal_data, ) del preictal_data if generate_synthetic_samples: # Generate synthetic preictal samples (batches) for file in preictal_files: data, synthetic_preictal_segment_index = gen_synthetic_batches( patient, file, data_dir, synthetic_preictal_segment_index, preictal_intervals, sample_rate, window_size, stride_len, n_channels, ) if data.size > 0: synthetic_preictal_data = np.concatenate( (synthetic_preictal_data, data), axis=1 ) print("Synthetic Preictal: ", synthetic_preictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_synthetic_preictal.npy" % patient, ), synthetic_preictal_data, ) del synthetic_preictal_data except Exception as e: print("Patient: %02d Failed" % patient) print(e)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/067/129067066.ipynb
null
null
[{"Id": 129067066, "ScriptId": 38216406, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14975555, "CreationDate": "05/10/2023 17:54:44", "VersionNumber": 2.0, "Title": "parallell cnn", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 530.0, "LinesInsertedFromPrevious": 512.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 18.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os from datetime import datetime, timedelta import pyedflib import numpy as np import pandas as pd from scipy import signal from scipy.signal import butter, lfilter import matplotlib.pyplot as plt from datetime import datetime, timedelta import os from math import floor import copy def create_dir(directory_path): """Method to create a directory. Returns True if the directory already exists.""" if os.path.exists(directory_path): return True else: os.makedirs(directory_path) return False def round_down(num, divisor): """Method to round down a number""" return num - (num % divisor) def get_time(datetime_string): """Method to convert a string to a datetime object.""" time = 0 try: time = datetime.strptime(datetime_string, "%H:%M:%S") except ValueError: datetime_string = " " + datetime_string if " 24" in datetime_string: datetime_string = datetime_string.replace(" 24", "23") time = datetime.strptime(datetime_string, "%H:%M:%S") time += timedelta(hours=1) else: datetime_string = datetime_string.replace(" 25", "23") time = datetime.strptime(datetime_string, "%H:%M:%S") time += timedelta(hours=2) return time def extract_interval_data( patient, data_dir, extract_ictal_samples=True, extract_preictal_samples=True, ictal_interval_padding_duration=32, seizure_occurance_period=30, seizure_prediction_horizon=5, ): """Method to extract interval patient data.""" patient_summary = open( os.path.join(data_dir, "chb%02d" % patient, "chb%02d-summary.txt" % patient), "r", ) interictal_intervals = [] interictal_files = [] ictal_intervals = [] ictal_files = [] preictal_intervals = [] preictal_files = [] line = patient_summary.readline() start_time = datetime.min old_time = datetime.min line_number = 0 while line: line_data = line.split(":") if line_data[0] == "File Name": file_name = line_data[1].strip() s = get_time(patient_summary.readline().split(": ")[1].strip()) if line_number == 0: start_time = s while s < old_time: s += timedelta(hours=24) old_time = s end_time_file = get_time(patient_summary.readline().split(": ")[1].strip()) while end_time_file < old_time: end_time_file = end_time_file + timedelta(hours=24) old_time = end_time_file n_seizures = int(patient_summary.readline().split(": ")[1]) if n_seizures == 0: # Extract interictal interval data interictal_intervals.append([s, end_time_file]) interictal_files.append([s, end_time_file, file_name]) else: # Extract ictal and preictal interval data for i in range(0, n_seizures): seconds_start = int( patient_summary.readline().split(": ")[1].split(" ")[0] ) seconds_end = int( patient_summary.readline().split(": ")[1].split(" ")[0] ) if extract_ictal_samples: # Extract ictal interval data interval_start = s + timedelta(seconds=seconds_start) if ( len(ictal_intervals) == 0 or interval_start > datetime.min ) and interval_start - start_time > timedelta(minutes=20): interval_end = s + timedelta(seconds=seconds_end) ictal_intervals.append( [ interval_start - timedelta( seconds=ictal_interval_padding_duration ), interval_end + timedelta( seconds=ictal_interval_padding_duration ), ] ) ictal_files.append([s, end_time_file, file_name]) if extract_preictal_samples: # Extract preictal interval data interval_start = ( s + timedelta(seconds=seconds_start) - timedelta( minutes=seizure_prediction_horizon + seizure_occurance_period ) ) if ( len(preictal_intervals) == 0 or interval_start > datetime.min ) and interval_start - start_time > timedelta(minutes=20): interval_end = interval_start + timedelta( minutes=seizure_occurance_period ) preictal_intervals.append([interval_start, interval_end]) preictal_files.append([s, end_time_file, file_name]) line = patient_summary.readline() line_number += 1 patient_summary.close() return ( interictal_intervals, interictal_files, ictal_intervals, ictal_files, preictal_intervals, preictal_files, ) def load_patient_data(patient, file, data_dir): """Method to load patient data.""" f = pyedflib.EdfReader("%schb%02d/%s" % (data_dir, patient, file)) n = f.signals_in_file signals = np.zeros((n, f.getNSamples()[0])) for i in np.arange(n): signals[i, :] = f.readSignal(i) return signals def extract_batches_from_interval( patient, data_dir, file, file_start, file_end, interval_start, interval_end, segment_index, n_channels, ): """Method to extract batch samples from specified intervals.""" start = 0 if file_start < interval_start: start = (interval_start - file_start).seconds * sample_rate if file_end <= interval_end: end = -1 data = load_patient_data(patient, file[2], data_dir)[:, start:] else: end = ((interval_end - file_start).seconds * sample_rate) + 1 data = load_patient_data(patient, file[2], data_dir)[:, start : end + 1] if (data.shape[0] >= n_channels) and (data.shape[1] >= sample_rate * window_size): truncated_len = round_down(data.shape[1], sample_rate * window_size) return ( np.array( np.split( data[0:n_channels, 0:truncated_len], truncated_len / (sample_rate * window_size), axis=1, ) ).swapaxes(0, 1), segment_index, ) else: return np.array([]), segment_index def extract_batches( patient, file, data_dir, segment_index, intervals, sample_rate, window_size, n_channels, ): """Method to extract batches.""" file_start = file[0] file_end = file[1] interval_start = intervals[segment_index][0] interval_end = intervals[segment_index][1] while file_start > interval_end and segment_index < len(intervals) - 1: segment_index += 1 interval_start = intervals[segment_index][0] interval_end = intervals[segment_index][1] if (interval_end - interval_start).seconds >= window_size: return extract_batches_from_interval( patient, data_dir, file, file_start, file_end, interval_start, interval_end, segment_index, n_channels, ) else: return np.array([]), segment_index def gen_synthetic_batches( patient, file, data_dir, segment_index, intervals, sample_rate, window_size, stride_len, n_channels, ): """Method to generate synthetic batches.""" file_start = file[0] file_end = file[1] interval_start = intervals[segment_index][0] interval_end = intervals[segment_index][1] while file_start > interval_end and segment_index < len(intervals) - 1: segment_index += 1 interval_start = intervals[segment_index][0] interval_end = intervals[segment_index][1] if (interval_end - interval_start).seconds > window_size: synthetic_batches = np.array([]).reshape( n_channels, 0, sample_rate * window_size ) synthetic_interval_start = interval_start + timedelta(seconds=stride_len) synthetic_interval_end = synthetic_interval_start + timedelta( seconds=window_size ) while synthetic_interval_end < interval_end: extracted_batches = extract_batches_from_interval( patient, data_dir, file, file_start, file_end, synthetic_interval_start, synthetic_interval_end, segment_index, n_channels, )[0] if extracted_batches.size > 0: synthetic_batches = np.concatenate( (synthetic_batches, extracted_batches), axis=1 ) synthetic_interval_start += timedelta(seconds=stride_len) synthetic_interval_end += timedelta(seconds=stride_len) return synthetic_batches, segment_index else: return np.array([]), segment_index os.path.exists("/kaggle/working/processed_data") n_channels = 22 sample_rate = 256 # Sample rate (Hz) window_size = 64 # Window size (seconds) # Stride length (seconds) used to generate synthetic preictal and ictal samples stride_len = 32 # Data directory path # data_dir = "/scratch/jcu/cl/CHBMIT/chb-mit-scalp-eeg-database-1.0.0/" data_dir = "/kaggle/input/chb01-21/chbmit/" processed_data_dir = ( "/kaggle/working/processed_data/" # Processed data output directory path ) patients = np.arange(1, 24) # Remove patients 4, 6, 7, 12, and 20, as their records contain anomalous data patients = np.delete(patients, [3, 5, 6, 11, 19]) patients = [1] # TEMP ictal_interval_padding_duration = 32 # ------------------------------------------------------------------------------ seizure_occurance_period = 30 # Seizure occurrence period (minutes) seizure_prediction_horizon = 5 # Seizure prediction horizon (minutes) # ----------------------------------------------------------------------- # ------------------------------------------------------------------------------ extract_ictal_samples = False extract_preictal_samples = True generate_synthetic_samples = False # ------------------------------------------------------------------------------ if __name__ == "__main__": for patient in patients: try: print("Patient: %02d" % patient) create_dir(processed_data_dir) ( interictal_intervals, interictal_files, ictal_intervals, ictal_files, preictal_intervals, preictal_files, ) = extract_interval_data( patient, data_dir, extract_ictal_samples, extract_preictal_samples, ictal_interval_padding_duration, seizure_occurance_period, seizure_prediction_horizon, ) if patient == 19: # Disregard the first seizure of patient 19 because it is not considered preictal_intervals.pop(0) interictal_segment_index = 0 interictal_data = np.array([]).reshape( n_channels, 0, sample_rate * window_size ) if extract_ictal_samples: ictal_segment_index = 0 synthetic_ictal_segment_index = 0 ictal_data = copy.deepcopy(interictal_data) synthetic_ictal_data = copy.deepcopy(interictal_data) if extract_preictal_samples: preictal_segment_index = 0 synthetic_preictal_segment_index = 0 preictal_data = copy.deepcopy(interictal_data) synthetic_preictal_data = copy.deepcopy(interictal_data) # Extract interictal samples (batches) for file in interictal_files: data, interictal_segment_index = extract_batches( patient, file, data_dir, interictal_segment_index, interictal_intervals, sample_rate, window_size, n_channels, ) if data.size > 0: interictal_data = np.concatenate((interictal_data, data), axis=1) print("Interictal: ", interictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_interictal.npy" % patient, ), interictal_data, ) del interictal_data if extract_ictal_samples: # Extract ictal samples (batches) for file in ictal_files: data, ictal_segment_index = extract_batches( patient, file, data_dir, ictal_segment_index, ictal_intervals, sample_rate, window_size, n_channels, ) if data.size > 0: ictal_data = np.concatenate((ictal_data, data), axis=1) print("Ictal: ", ictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_ictal.npy" % patient, ), ictal_data, ) del ictal_data if generate_synthetic_samples: # Generate synthetic ictal samples (batches) for file in ictal_files: data, synthetic_ictal_segment_index = gen_synthetic_batches( patient, file, data_dir, synthetic_ictal_segment_index, ictal_intervals, sample_rate, window_size, stride_len, n_channels, ) if data.size > 0: synthetic_ictal_data = np.concatenate( (synthetic_ictal_data, data), axis=1 ) print("Synthetic Ictal: ", synthetic_ictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_synthetic_ictal.npy" % patient, ), synthetic_ictal_data, ) del synthetic_ictal_data if extract_preictal_samples: # Extract preictal samples (batches) for file in preictal_files: data, preictal_segment_index = extract_batches( patient, file, data_dir, preictal_segment_index, preictal_intervals, sample_rate, window_size, n_channels, ) if data.size > 0: preictal_data = np.concatenate((preictal_data, data), axis=1) print("Preictal: ", preictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_preictal.npy" % patient, ), preictal_data, ) del preictal_data if generate_synthetic_samples: # Generate synthetic preictal samples (batches) for file in preictal_files: data, synthetic_preictal_segment_index = gen_synthetic_batches( patient, file, data_dir, synthetic_preictal_segment_index, preictal_intervals, sample_rate, window_size, stride_len, n_channels, ) if data.size > 0: synthetic_preictal_data = np.concatenate( (synthetic_preictal_data, data), axis=1 ) print("Synthetic Preictal: ", synthetic_preictal_data.shape) np.save( os.path.join( processed_data_dir, "CHBMIT_patient_%02d_synthetic_preictal.npy" % patient, ), synthetic_preictal_data, ) del synthetic_preictal_data except Exception as e: print("Patient: %02d Failed" % patient) print(e)
false
0
4,303
0
4,303
4,303
129067634
<jupyter_start><jupyter_text>pretrainedmodels ### Pretrained Models in Pytorch Github repo: https://github.com/Cadene/pretrained-models.pytorch Version: 0.7.4 Original Author: Cadene License: https://github.com/Cadene/pretrained-models.pytorch/blob/master/LICENSE.txt Kaggle dataset identifier: pretrainedmodels <jupyter_script># ## summary # **This notebook is written for new kaggler** # **I can't guarantee that all comments are interpreted correctly, so please point out if there are mistakes** # Original code is here [https://www.kaggle.com/code/tanakar/2-5d-segmentaion-baseline-training](http://) # * 2.5d segmentation # * segmentation_models_pytorch # * Unet # * use only 6 slices in the middle # * slide inference # sklearn.metrics.classification is an evaluation metrics module for classification problems in the scikit-learn library.This module provides a series of functions to calculate the accuracy, precision, recall, F1 value and other metrics of a classification model to evaluate the performance of the model.These metrics can help us understand the classification ability of the model, so that we can optimize the parameters and algorithms of the model and improve the prediction accuracy of the model. # Pickle is a module in Python for serializing and deserializing Python objects.With Pickle, we can convert a Python object to a byte stream and then save it to a file or transfer it over the network.Conversely, we can also deserialize byte streams to Python objects.Pickle is a persistent storage method in Python that makes it easy to save and restore data. # Autocast and GradScaler are both tools in PyTorch for accelerating model training and reducing memory footprint. # Autocast is an automatic mixed precision tool that automatically converts floating point numbers to half-precision floating point numbers during the forward and backward propagation of the model. This helps reduce GPU memory usage and speed up model training. When using Autocast, the model and optimizer need to be wrapped in the torch.cuda.amp.autocast() context manager.GradScaler is a gradient scaling tool for scaling the value of the gradient during training. This helps to solve the problem of disappearing or exploding gradients and improves model stability and training results. When using GradScaler, the gradient needs to be multiplied by a scaling factor before back-propagation is performed. The scaling factor can be dynamically adjusted according to the value of the gradient to ensure the stability of the gradient. from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, log_loss import pickle from torch.utils.data import DataLoader from torch.cuda.amp import autocast, GradScaler import warnings import sys import pandas as pd import os import gc import sys import math import time import random import shutil from pathlib import Path from contextlib import contextmanager from collections import defaultdict, Counter import cv2 import scipy as sp import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm.auto import tqdm from functools import partial import argparse import importlib import torch import torch.nn as nn from torch.optim import Adam, SGD, AdamW import datetime # sys.path.append('/kaggle/input/pretrainedmodels/pretrainedmodels-0.7.4') # sys.path.append('/kaggle/input/efficientnet-pytorch/EfficientNet-PyTorch-master') # sys.path.append('/kaggle/input/timm-pytorch-image-models/pytorch-image-models-master') # sys.path.append('/kaggle/input/segmentation-models-pytorch/segmentation_models.pytorch-master') import segmentation_models_pytorch as smp # For segmentation_models_pytorch, it is a PyTorch-based deep learning library for image segmentation tasks. # It supports many popular segmentation models, such as UNet, LinkNet, FPN, etc., and provides many pre-trained models and datasets to facilitate users to quickly build and train their own models. import numpy as np from torch.utils.data import DataLoader, Dataset import cv2 import torch import os import albumentations as A from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform # ## config class CFG: # ============== comp exp name ============= comp_name = "vesuvius" # comp_dir_path = './' comp_dir_path = "/kaggle/input/" comp_folder_name = "vesuvius-challenge-ink-detection" # comp_dataset_path = f'{comp_dir_path}datasets/{comp_folder_name}/' comp_dataset_path = f"{comp_dir_path}{comp_folder_name}/" exp_name = "vesuvius_2d_slide_exp002" # ============== pred target ============= target_size = 1 # ============== model cfg ============= # Image segmentation model Pre-training parameters for convolutional networks model_name = "Unet" # backbone = 'efficientnet-b0' backbone = "resnext101_32x4d" # backbone = 'resnext50_32x4d' # backbone = 'resnet50' # There are 65 "channels", # a three-dimensional image of a certain dimension split into multiple two-dimensional images, # the ink will be immersed in the deeper the image, the larger the information more complete in_chans = 3 # 64 # ============== training cfg ============= # Size modification of input image or mask image size = 224 tile_size = 224 # The number of small images cut out and the position of each small image can be controlled by setting CFG.stride. stride = tile_size // 2 train_batch_size = 16 # 32 valid_batch_size = train_batch_size * 2 use_amp = True # Learning rate regulator scheduler = "GradualWarmupSchedulerV2" # scheduler = 'CosineAnnealingLR' epochs = 10 # 30 """ warmup_factor is a scaling factor that controls the rate of learning rate increase. It is usually used at the beginning of training to accelerate the learning rate increase if the model weights are not yet accurate enough. lr is the learning rate size, and in this equation, the learning rate is divided by 10 with the effect of warmup_factor. This is because at the beginning of the training, the learning rate should be relatively small so that the model can converge better, and as the training proceeds, the learning rate will gradually increase so that the weight space can be explored better. Thus, with this formula, a relatively small learning rate can be used at the beginning of training and gradually increased to the appropriate size. """ # adamW warmup_factor = 10 # lr = 1e-4 / warmup_factor lr = 1e-4 / warmup_factor # ============== fold ============= # k-fold cross-validation This method has the advantage of making better use of the data while reducing errors due to the chance of data division. # The disadvantage is that k-times training and validation are required and the computational cost is high. # This seems to mean that each of the three folders is used as a validation set valid_id = 1 """ In this example, metric_direction is set to 'maximize', indicating that the metric we want to optimize should be as large as possible. """ # objective_cv = 'binary' # 'binary', 'multiclass', 'regression' metric_direction = "maximize" # maximize, 'minimize' # metrics = 'dice_coef' # ============== fixed ============= pretrained = True inf_weight = "best" # 'best' min_lr = 1e-6 weight_decay = 1e-6 max_grad_norm = 1000 print_freq = 50 num_workers = 10 # Fixed seeds make code reproducible # Randomness in deep learning is caused by factors such as weight initialization, random sampling, etc. These randomness can make the model more expressive and generalizable. # However, since deep learning models are usually very large, their training requires a lot of time and computational resources. Therefore, it is useful to fix random seeds in order to make the experiments reproducible. # Using a fixed random seed ensures that the same sequence of random numbers is generated using the same random number generator each time the experiment is run. # This allows researchers to get the same results when running experiments on different machines, thus facilitating the comparison and validation of results. # In addition, using a fixed random seed also makes the model's behavior more predictable during training, thus helping researchers to better understand the model's performance and behavior. seed = 42 # ============== set dataset path ============= print("set dataset path") outputs_path = f"/kaggle/working/outputs/{comp_name}/{exp_name}/" submission_dir = outputs_path + "submissions/" submission_path = submission_dir + f"submission_{exp_name}.csv" model_dir = outputs_path + f"{comp_name}-models/" figures_dir = outputs_path + "figures/" log_dir = outputs_path + "logs/" log_path = log_dir + f"{exp_name}.txt" # ============== augmentation ============= # Data Enhancement train_aug_list = [ # A.RandomResizedCrop( # size, size, scale=(0.85, 1.0)), A.Resize(size, size), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.RandomBrightnessContrast(p=0.75), A.ShiftScaleRotate(p=0.75), A.OneOf( [ A.GaussNoise(var_limit=[10, 50]), A.GaussianBlur(), A.MotionBlur(), ], p=0.4, ), A.GridDistortion(num_steps=5, distort_limit=0.3, p=0.5), A.CoarseDropout( max_holes=1, max_width=int(size * 0.3), max_height=int(size * 0.3), mask_fill_value=0, p=0.5, ), # A.Cutout(max_h_size=int(size * 0.6), # max_w_size=int(size * 0.6), num_holes=1, p=1.0), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] valid_aug_list = [ A.Resize(size, size), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] # ## helper # Computes and stores the average and current value # Specifically, it serves to call the update method to update the value of the instance each time the average needs to be calculated, and to call the avg method to return the average when the average needs to be calculated. class AverageMeter(object): """Computes and stores the average and current value""" # Initialize the property values of the instance def __init__(self): self.reset() # Reset the property value of the instance to 0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 # Update the property value of the instance to the given value def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count # # log # accepts one argument log_file log file path # The function initializes a logger object, which is used to record logging information when the program is running. This is implemented as follows: def init_logger(log_file): from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter("%(message)s")) handler2 = FileHandler(filename=log_file) handler2.setFormatter(Formatter("%(message)s")) logger.addHandler(handler1) logger.addHandler(handler2) return logger def set_seed(seed=None, cudnn_deterministic=True): if seed is None: seed = 42 os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = cudnn_deterministic torch.backends.cudnn.benchmark = False # This function creates these directories using the os.makedirs() method and does not overwrite the directories if they already exist, i.e. the exist_ok=True argument means that no errors will be reported. # The purpose of this function is to create these directories when needed for use in subsequent code. def make_dirs(cfg): for dir in [cfg.model_dir, cfg.figures_dir, cfg.submission_dir, cfg.log_dir]: os.makedirs(dir, exist_ok=True) # # Initialization functions def cfg_init(cfg, mode="train"): set_seed(cfg.seed) # set_env_name() # set_dataset_path(cfg) if mode == "train": make_dirs(cfg) cfg_init(CFG) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") Logger = init_logger(log_file=CFG.log_path) Logger.info("\n\n-------- exp_info -----------------") # Logger.info(datetime.datetime.now().strftime('%Y年%m月%d日 %H:%M:%S')) # ## image, mask # The purpose of this code is to find a window of length CFG.in_chans, so that the subscript of the center point of the window is mid. # Specifically, first divide mid by 2, then subtract half of CFG.in_chans from the result to get the start subscript of the window, then add half of CFG.in_chans to mid to get the end subscript of the window. # Then add half of CFG.in_chans to mid to get the end of the window. # Finally, use the range function to generate a sequence of integers idxs from start to end-1. # # **Set the number of images to be read from 0-65, and adjust the number of input "channels" by changing in_chans** def read_image_mask(fragment_id): images = [] # idxs = range(65) mid = 65 // 2 start = mid - CFG.in_chans // 2 end = mid + CFG.in_chans // 2 idxs = range(start, end) for i in tqdm(idxs): image = cv2.imread( CFG.comp_dataset_path + f"train/{fragment_id}/surface_volume/{i:02}.tif", 0 ) pad0 = CFG.tile_size - image.shape[0] % CFG.tile_size pad1 = CFG.tile_size - image.shape[1] % CFG.tile_size image = np.pad(image, [(0, pad0), (0, pad1)], constant_values=0) images.append(image) images = np.stack(images, axis=2) mask = cv2.imread(CFG.comp_dataset_path + f"train/{fragment_id}/inklabels.png", 0) mask = np.pad(mask, [(0, pad0), (0, pad1)], constant_values=0) mask = mask.astype("float32") mask /= 255.0 return images, mask def get_train_valid_dataset(): train_images = [] train_masks = [] valid_images = [] valid_masks = [] valid_xyxys = [] for fragment_id in range(1, 4): image, mask = read_image_mask(fragment_id) """ This code is generating a set of image crop coordinates that are used to split a large size image into smaller pieces for easier processing. Where image is the original image and CFG is a set of constant parameters. Specifically, the parameters of the range function in the code set the step size and range of the crop coordinates. image.shape[0] and image.shape[1] denote the height and width of the image, respectively. Subtracting CFG.tile_size is to ensure that the segmented chunks are all squares with CFG.tile_size as the side length. The final x1_list and y1_list are the coordinates of all the generated images, which are used for subsequent processing. """ x1_list = list(range(0, image.shape[1] - CFG.tile_size + 1, CFG.stride)) y1_list = list(range(0, image.shape[0] - CFG.tile_size + 1, CFG.stride)) # where y1_list and x1_list are the list of starting coordinates in the width and height directions of the large image, respectively, and CFG.tile_size is the size of each small image block. # If fragment_id is equal to CFG.valid_id, the current image block and mask are stored in the list of valid_images and valid_masks # and store the start and end coordinates corresponding to the current image block into the valid_xyxys list; otherwise store them into the train_images and train_masks lists. for y1 in y1_list: for x1 in x1_list: y2 = y1 + CFG.tile_size x2 = x1 + CFG.tile_size # xyxys.append((x1, y1, x2, y2)) # Place the set folder with the corresponding label in the validation data set, folder 1 or folder 2 or folder 3 if fragment_id == CFG.valid_id: valid_images.append(image[y1:y2, x1:x2]) valid_masks.append(mask[y1:y2, x1:x2, None]) valid_xyxys.append([x1, y1, x2, y2]) # unspecified data sets into the training set, 2 training sets 1 validation set else: train_images.append(image[y1:y2, x1:x2]) train_masks.append(mask[y1:y2, x1:x2, None]) return train_images, train_masks, valid_images, valid_masks, valid_xyxys ( train_images, train_masks, valid_images, valid_masks, valid_xyxys, ) = get_train_valid_dataset() valid_xyxys = np.stack(valid_xyxys) # ## dataset import numpy as np from torch.utils.data import DataLoader, Dataset import cv2 import torch import os import albumentations as A from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform def get_transforms(data, cfg): if data == "train": aug = A.Compose(cfg.train_aug_list) elif data == "valid": aug = A.Compose(cfg.valid_aug_list) # print(aug) return aug class CustomDataset(Dataset): def __init__(self, images, cfg, labels=None, transform=None): self.images = images self.cfg = cfg self.labels = labels self.transform = transform def __len__(self): # return len(self.df) return len(self.images) def __getitem__(self, idx): image = self.images[idx] label = self.labels[idx] if self.transform: data = self.transform(image=image, mask=label) image = data["image"] label = data["mask"] return image, label # # Training set and validation set data processing train_dataset = CustomDataset( train_images, CFG, labels=train_masks, transform=get_transforms(data="train", cfg=CFG), ) valid_dataset = CustomDataset( valid_images, CFG, labels=valid_masks, transform=get_transforms(data="valid", cfg=CFG), ) train_loader = DataLoader( train_dataset, batch_size=CFG.train_batch_size, shuffle=True, num_workers=CFG.num_workers, pin_memory=True, drop_last=True, ) valid_loader = DataLoader( valid_dataset, batch_size=CFG.valid_batch_size, shuffle=False, num_workers=CFG.num_workers, pin_memory=True, drop_last=False, ) train_dataset[0][0].shape plot_dataset = CustomDataset(train_images, CFG, labels=train_masks) transform = CFG.train_aug_list transform = A.Compose( [t for t in transform if not isinstance(t, (A.Normalize, ToTensorV2))] ) plot_count = 0 for i in range(1000): image, mask = plot_dataset[i] data = transform(image=image, mask=mask) aug_image = data["image"] aug_mask = data["mask"] if mask.sum() == 0: continue fig, axes = plt.subplots(1, 4, figsize=(15, 8)) axes[0].imshow(image[..., 0], cmap="gray") axes[1].imshow(mask, cmap="gray") axes[2].imshow(aug_image[..., 0], cmap="gray") axes[3].imshow(aug_mask, cmap="gray") plt.savefig(CFG.figures_dir + f"aug_fold_{CFG.valid_id}_{plot_count}.png") plot_count += 1 if plot_count == 5: break del plot_dataset gc.collect() # ## model # **Unet convolutional network construction** # This code is initializing the image segmentation task using the Unet model from the segmentation_models_pytorch library. class CustomModel(nn.Module): def __init__(self, cfg, weight=None): super().__init__() self.cfg = cfg # The encoder_name parameter is used to specify which pre-trained encoder model to use. Here, a pre-trained model is used, so the encoder_name parameter is specified. # The advantage of this is that the feature extraction capability of the existing pre-trained model can be used to accelerate the training of the model and improve the accuracy of the model. Also, different pre-trained models can be selected according to actual needs to achieve better results. # We can use the pre-training weights of the image classification model to initialize the convolutional layer of the UNet network, thus improving the performance and generalization ability of the model # encoder_name: indicates the name of the pre-trained model used # encoder_weights: indicates the weights of the pre-trained model used, usually imagenet is used. # in_channels: indicates the number of channels of the input image, e.g., 3 channels for RGB images # classes: the number of classifications, usually the number of pixel classifications for image segmentation tasks. # activation: indicates the activation function, usually None (no activation function is used) or sigmoid (the output is between 0 and 1). self.encoder = smp.Unet( encoder_name=cfg.backbone, encoder_weights=weight, in_channels=cfg.in_chans, classes=cfg.target_size, activation=None, ) # After the initialization, the input image can be fed into the model for forward propagation to obtain the corresponding segmentation results. def forward(self, image): output = self.encoder(image) # output = output.squeeze(-1) return output # weight="imagenet" def build_model(cfg, weight="ssl"): print("model_name", cfg.model_name) print("backbone", cfg.backbone) model = CustomModel(cfg, weight) return model # ## scheduler # [https://www.kaggle.com/code/underwearfitting/single-fold-training-of-resnet200d-lb0-965](http://) # Its main role is to gradually increase the learning rate at the beginning of training to help the model converge faster. It is a subclass of the GradualWarmupScheduler class with the addition of an after_scheduler parameter for using other learning rate scheduling methods after the warm-up is over. # In each epoch, the get_lr() method will return the current learning rate. # At the beginning of training, the learning rate will be gradually increased from the initial value to the maximum value, after which the learning rate returned by after_scheduler (another learning rate update method) will be used. If after_scheduler is not provided, the current learning rate will continue to be used. # import torch.nn as nn import torch import math import time import numpy as np import torch from torch.optim.lr_scheduler import ( CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau, ) from warmup_scheduler import GradualWarmupScheduler class GradualWarmupSchedulerV2(GradualWarmupScheduler): def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None): super(GradualWarmupSchedulerV2, self).__init__( optimizer, multiplier, total_epoch, after_scheduler ) def get_lr(self): if self.last_epoch > self.total_epoch: if self.after_scheduler: if not self.finished: self.after_scheduler.base_lrs = [ base_lr * self.multiplier for base_lr in self.base_lrs ] self.finished = True return self.after_scheduler.get_lr() return [base_lr * self.multiplier for base_lr in self.base_lrs] if self.multiplier == 1.0: return [ base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs ] else: return [ base_lr * ((self.multiplier - 1.0) * self.last_epoch / self.total_epoch + 1.0) for base_lr in self.base_lrs ] # This code is a function to get the learning rate scheduler with input parameters of configuration (cfg) and optimizer (optimizer). Two learning rate schedulers are used in this function, CosineAnnealingLR and GradualWarmupSchedulerV2. # Among them, CosineAnnealingLR is a cosine annealing learning rate scheduler, whose function is to gradually reduce the learning rate during training to make the model converge more stably. # GradualWarmupSchedulerV2 is a learning rate preheating scheduler, whose role is to gradually increase the learning rate at the beginning of training to avoid the model from falling into a local optimum solution at the beginning and failing to jump out. # In this function, we first define a CosineAnnealingLR scheduler, setting it to the total number of training rounds (cfg.epochs) as cycles and a minimum learning rate of 1e-7. # Then, we use this scheduler as the after_scheduler parameter of GradualWarmupSchedulerV2, and also use it as the parameter of the input optimizer, multiplier indicates the learning rate multiplier during warm-up, and total_epoch indicates the number of warm-up rounds. # Finally, the GradualWarmupSchedulerV2 scheduler is returned as the output. def get_scheduler(cfg, optimizer): scheduler_cosine = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, cfg.epochs, eta_min=1e-7 ) scheduler = GradualWarmupSchedulerV2( optimizer, multiplier=10, total_epoch=1, after_scheduler=scheduler_cosine ) return scheduler def scheduler_step(scheduler, avg_val_loss, epoch): scheduler.step(epoch) # This line of code uses the AdamW optimizer, which takes the parameters of the model as input and uses the learning rate of lr from CFG. AdamW is a variant of the Adam optimizer that is used to update the model parameters when training a neural network. # It updates the parameters by weighted averaging the gradients of the parameters to minimize the loss function.AdamW also uses a regularization method called weight decay to prevent the model from overfitting the training data. model = build_model(CFG) model.to(device) optimizer = AdamW(model.parameters(), lr=CFG.lr) scheduler = get_scheduler(CFG, optimizer) # ## loss # Dice Loss is a commonly used loss function for image segmentation, which is based on the binary cross-entropy loss function and the Dice coefficient. # The Dice coefficient is used to evaluate the similarity between the predicted segmentation result and the true label, which is defined as twice the ratio of the intersection size of the two sets to their concatenation size. # And Dice Loss is defined as 1 minus the Dice coefficient as the loss function, i.e., Dice Loss = 1 - (2 * (the intersection size of the predicted segmentation result and the true label) / (the size of the predicted segmentation result + the size of the true label)). # During the training process, the optimizer minimizes the Dice Loss so as to maximize the similarity between the predicted segmentation result and the true labels. # BCELoss is a Binary Cross Entropy Loss function, which is usually used in binary classification problems. # It measures the performance of the model by calculating the difference between the model prediction results and the true labels. In the # binary cross entropy loss function, for each sample, we denote its true label as 0 or 1, and the prediction result is also a probability value between 0 and 1. DiceLoss = smp.losses.DiceLoss(mode="binary") BCELoss = smp.losses.SoftBCEWithLogitsLoss() alpha = 0.5 beta = 1 - alpha # This is a loss function that uses the Tversky index, a metric used to evaluate the degree of similarity between two sets. In this case # it is used to assess the degree of similarity between the predicted and true results of the model. # alpha and beta are hyperparameters that are used to adjust the weights of the Tversky index. This loss function is used for the binary classification problem. # The log_loss parameter determines whether the logarithmic loss function is used. # smp refers to the Segmentation Models PyTorch library, which is a deep learning library for image segmentation tasks. TverskyLoss = smp.losses.TverskyLoss( mode="binary", log_loss=False, alpha=alpha, beta=beta ) def criterion(y_pred, y_true): # return 0.5 * BCELoss(y_pred, y_true) + 0.5 * DiceLoss(y_pred, y_true) return BCELoss(y_pred, y_true) # return 0.5 * BCELoss(y_pred, y_true) + 0.5 * TverskyLoss(y_pred, y_true) # ## train, val def train_fn(train_loader, model, criterion, optimizer, device): model.train() """ GradScaler is a tool in PyTorch for mixed-accuracy training that automatically scales gradient values to avoid gradient underflow when computing at FP16 precision. In the configuration, GradScaler is enabled if AMP (Automatic Mixed Precision) is used. """ scaler = GradScaler(enabled=CFG.use_amp) """ This is a tool class for calculating the average loss. When training a neural network, it is common to calculate the losses for each batch (batches) and add them up to a total loss value. In order to get the average loss, the total loss value needs to be divided by the number of batches. The AverageMeter class encapsulates this process by making it easy to record the total loss value and the number of batches and to calculate the average loss value. After each batch, the total loss value and batch count can be updated by calling the update() method of this class, and finally the average loss value can be obtained by calling the avg property of this class. """ losses = AverageMeter() for step, (images, labels) in tqdm( enumerate(train_loader), total=len(train_loader) ): images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) with autocast(CFG.use_amp): y_preds = model(images) loss = criterion(y_preds, labels) # Backpropagation and gradient calculation for the loss function # scaler.scale(loss) is gradient scaling using PyTorch's GradScaler, this is to prevent gradient explosion or gradient disappearance during backpropagation. backward() is to backpropagate the parameters of the model and calculate the gradient. losses.update(loss.item(), batch_size) scaler.scale(loss).backward() # This code is used to perform gradient cropping. During the training of a deep learning model, the gradient values may become very large, which can lead to instability of the model. To avoid this, we can use the gradient cropping method to keep the gradient value within an acceptable range. grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), CFG.max_grad_norm ) # Specifically, scaler.step(optimizer) is the optimizer's gradient update on mixed precision # scaler.update() is used to update the scaling factor inside the scaler. # optimizer.zero_grad() is used to clear the gradient information in the optimizer so that the gradient can be recalculated in the next iteration. scaler.step(optimizer) scaler.update() optimizer.zero_grad() return losses.avg def valid_fn(valid_loader, model, criterion, device, valid_xyxys, valid_mask_gt): mask_pred = np.zeros(valid_mask_gt.shape) mask_count = np.zeros(valid_mask_gt.shape) model.eval() losses = AverageMeter() for step, (images, labels) in tqdm( enumerate(valid_loader), total=len(valid_loader) ): images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) with torch.no_grad(): y_preds = model(images) loss = criterion(y_preds, labels) losses.update(loss.item(), batch_size) """ This code is used to generate the prediction masks, predicting the mask value for each pixel point based on the model's output, and then assigning it to the corresponding region of the mask. where y_preds is the output of the model, which is mapped to between [0,1] as the mask value by the sigmoid function; valid_xyxys is the coordinate and size information of the validation set images, and start_idx and end_idx are the corresponding start and end indexes of the currently processed batch in valid_xyxys; mask_pred and mask_count are the generated masks and the number of times each pixel point is assigned. Finally, the mask value is divided by the number of assignments to get the average mask value of each pixel point. """ # make whole mask y_preds = torch.sigmoid(y_preds).to("cpu").numpy() start_idx = step * CFG.valid_batch_size end_idx = start_idx + batch_size for i, (x1, y1, x2, y2) in enumerate(valid_xyxys[start_idx:end_idx]): mask_pred[y1:y2, x1:x2] += y_preds[i].squeeze(0) mask_count[y1:y2, x1:x2] += np.ones((CFG.tile_size, CFG.tile_size)) print(f"mask_count_min: {mask_count.min()}") mask_pred /= mask_count return losses.avg, mask_pred # ## metrics from sklearn.metrics import fbeta_score def fbeta_numpy(targets, preds, beta=0.5, smooth=1e-5): """ https://www.kaggle.com/competitions/vesuvius-challenge-ink-detection/discussion/397288 """ y_true_count = targets.sum() ctp = preds[targets == 1].sum() cfp = preds[targets == 0].sum() beta_squared = beta * beta c_precision = ctp / (ctp + cfp + smooth) c_recall = ctp / (y_true_count + smooth) dice = ( (1 + beta_squared) * (c_precision * c_recall) / (beta_squared * c_precision + c_recall + smooth) ) return dice # The purpose of this code is to calculate the F-beta score between a given mask and a predicted mask and return the best threshold and the best F-beta score. # It calculates the F-beta score by spreading the mask and the prediction mask into a one-dimensional array and looping over a series of thresholds. # The optimal threshold is the threshold that maximizes the F-beta score, and the best F-beta score is the F-beta score calculated at the optimal threshold. def calc_fbeta(mask, mask_pred): mask = mask.astype(int).flatten() mask_pred = mask_pred.flatten() best_th = 0 best_dice = 0 for th in np.array(range(10, 50 + 1, 5)) / 100: # dice = fbeta_score(mask, (mask_pred >= th).astype(int), beta=0.5) dice = fbeta_numpy(mask, (mask_pred >= th).astype(int), beta=0.5) print(f"th: {th}, fbeta: {dice}") if dice > best_dice: best_dice = dice best_th = th Logger.info(f"best_th: {best_th}, fbeta: {best_dice}") return best_dice, best_th # This code defines a function called calc_cv that takes two arguments: mask_gt and mask_pred. These two arguments represent the true mask (i.e., ground truth) and the model prediction mask, respectively. # The function internally calls the calc_fbeta function to calculate the best Dice coefficient and the best threshold, and returns them as a tuple. # The Dice coefficient is a commonly used measure of the similarity of two masks and takes values from 0 to 1, with higher values indicating higher similarity. The threshold is a parameter needed to convert the mask into a binarized image. def calc_cv(mask_gt, mask_pred): best_dice, best_th = calc_fbeta(mask_gt, mask_pred) return best_dice, best_th # ## main fragment_id = CFG.valid_id valid_mask_gt = cv2.imread( CFG.comp_dataset_path + f"train/{fragment_id}/inklabels.png", 0 ) valid_mask_gt = valid_mask_gt / 255 """ The purpose of this code is to zero-fill the valid_mask_gt array so that the number of rows is a multiple of CFG.tile_size. Specifically, pad0 and pad1 represent the number of zeros to be filled in the first and second dimensions of the valid_mask_gt array, respectively, so that both valid_mask_gt.shape[0] and valid_mask_gt.shape[1] are multiples of CFG.tile_size. """ pad0 = CFG.tile_size - valid_mask_gt.shape[0] % CFG.tile_size pad1 = CFG.tile_size - valid_mask_gt.shape[1] % CFG.tile_size valid_mask_gt = np.pad(valid_mask_gt, [(0, pad0), (0, pad1)], constant_values=0) """ 这行代码使用了NumPy中的`pad`函数,将`valid_mask_gt`数组在两个维度上进行了填充, 以便与另一个数组进行操作时具有相同的形状。具体来说,`[(0, pad0), (0, pad1)]`表示在第一个维度上不进行填充(前面填0个,后面填0个), 在第二个维度上填充`pad1`个0在后面,填充`pad0`个0在前面。 这样做的目的是将`valid_mask_gt`数组的形状扩展到与另一个数组相同,以便进行一些操作,例如相加、相减等。 """ fold = CFG.valid_id """ This code initializes the variables for the best score based on the direction of the evaluation metric. If the evaluation indicator is of the "minimization" type, the initial value should be positive infinity (np.inf); If the evaluation indicator is of the "maximize" type, then the initial value should be negative one (-1). This ensures that the value of the best score can be updated if a better score emerges during the subsequent evaluation. """ if CFG.metric_direction == "minimize": best_score = np.inf elif CFG.metric_direction == "maximize": best_score = -1 best_loss = np.inf for epoch in range(CFG.epochs): start_time = time.time() # train avg_loss = train_fn(train_loader, model, criterion, optimizer, device) # eval avg_val_loss, mask_pred = valid_fn( valid_loader, model, criterion, device, valid_xyxys, valid_mask_gt ) scheduler_step(scheduler, avg_val_loss, epoch) best_dice, best_th = calc_cv(valid_mask_gt, mask_pred) # score = avg_val_loss score = best_dice elapsed = time.time() - start_time Logger.info( f"Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s" ) # Logger.info(f'Epoch {epoch+1} - avgScore: {avg_score:.4f}') Logger.info(f"Epoch {epoch+1} - avgScore: {score:.4f}") """ This code is used to update the best score based on the direction of the evaluation metric (whether it is minimized or maximized). If the direction of the evaluation metric is minimize, the best score is updated to the new score if the new score is lower than the current best score. If the direction of the evaluation metric is maximization, the best score is updated to the new score if the new score is higher than the current best score. This allows tracking the best model during training and using that model during evaluation. """ if CFG.metric_direction == "minimize": update_best = score < best_score elif CFG.metric_direction == "maximize": update_best = score > best_score if update_best: best_loss = avg_val_loss best_score = score Logger.info(f"Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model") Logger.info(f"Epoch {epoch+1} - Save Best Loss: {best_loss:.4f} Model") torch.save( {"model": model.state_dict(), "preds": mask_pred}, CFG.model_dir + f"{CFG.model_name}_fold{fold}_best.pth", ) check_point = torch.load( CFG.model_dir + f"{CFG.model_name}_fold{fold}_{CFG.inf_weight}.pth", map_location=torch.device("cpu"), ) mask_pred = check_point["preds"] best_dice, best_th = calc_fbeta(valid_mask_gt, mask_pred) fig, axes = plt.subplots(1, 3, figsize=(15, 8)) axes[0].imshow(valid_mask_gt) axes[1].imshow(mask_pred) axes[2].imshow((mask_pred >= best_th).astype(int)) plt.hist(mask_pred.flatten(), bins=20)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/067/129067634.ipynb
pretrainedmodels
rishabhiitbhu
[{"Id": 129067634, "ScriptId": 37541960, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14644876, "CreationDate": "05/10/2023 18:00:27", "VersionNumber": 1.0, "Title": "2.5d segmentaion baseline [training]", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 864.0, "LinesInsertedFromPrevious": 209.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 655.0, "LinesInsertedFromFork": 209.0, "LinesDeletedFromFork": 33.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 655.0, "TotalVotes": 0}]
[{"Id": 184795548, "KernelVersionId": 129067634, "SourceDatasetVersionId": 699609}, {"Id": 184795549, "KernelVersionId": 129067634, "SourceDatasetVersionId": 3492463}, {"Id": 184795550, "KernelVersionId": 129067634, "SourceDatasetVersionId": 3492503}, {"Id": 184795551, "KernelVersionId": 129067634, "SourceDatasetVersionId": 3951115}, {"Id": 184795552, "KernelVersionId": 129067634, "SourceDatasetVersionId": 5309119}]
[{"Id": 699609, "DatasetId": 255887, "DatasourceVersionId": 719625, "CreatorUserId": 761152, "LicenseName": "Other (specified in description)", "CreationDate": "09/23/2019 15:38:56", "VersionNumber": 3.0, "Title": "pretrainedmodels", "Slug": "pretrainedmodels", "Subtitle": "pretrained-models.pytorch", "Description": "### Pretrained Models in Pytorch\n\nGithub repo: https://github.com/Cadene/pretrained-models.pytorch\n\nVersion: 0.7.4\n\nOriginal Author: Cadene\n\nLicense: https://github.com/Cadene/pretrained-models.pytorch/blob/master/LICENSE.txt", "VersionNotes": "oops", "TotalCompressedBytes": 58821.0, "TotalUncompressedBytes": 58821.0}]
[{"Id": 255887, "CreatorUserId": 761152, "OwnerUserId": 761152.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 699609.0, "CurrentDatasourceVersionId": 719625.0, "ForumId": 267153, "Type": 2, "CreationDate": "07/06/2019 09:50:03", "LastActivityDate": "07/06/2019", "TotalViews": 8370, "TotalDownloads": 2757, "TotalVotes": 82, "TotalKernels": 150}]
[{"Id": 761152, "UserName": "rishabhiitbhu", "DisplayName": "Rishabh Agrahari", "RegisterDate": "10/21/2016", "PerformanceTier": 2}]
# ## summary # **This notebook is written for new kaggler** # **I can't guarantee that all comments are interpreted correctly, so please point out if there are mistakes** # Original code is here [https://www.kaggle.com/code/tanakar/2-5d-segmentaion-baseline-training](http://) # * 2.5d segmentation # * segmentation_models_pytorch # * Unet # * use only 6 slices in the middle # * slide inference # sklearn.metrics.classification is an evaluation metrics module for classification problems in the scikit-learn library.This module provides a series of functions to calculate the accuracy, precision, recall, F1 value and other metrics of a classification model to evaluate the performance of the model.These metrics can help us understand the classification ability of the model, so that we can optimize the parameters and algorithms of the model and improve the prediction accuracy of the model. # Pickle is a module in Python for serializing and deserializing Python objects.With Pickle, we can convert a Python object to a byte stream and then save it to a file or transfer it over the network.Conversely, we can also deserialize byte streams to Python objects.Pickle is a persistent storage method in Python that makes it easy to save and restore data. # Autocast and GradScaler are both tools in PyTorch for accelerating model training and reducing memory footprint. # Autocast is an automatic mixed precision tool that automatically converts floating point numbers to half-precision floating point numbers during the forward and backward propagation of the model. This helps reduce GPU memory usage and speed up model training. When using Autocast, the model and optimizer need to be wrapped in the torch.cuda.amp.autocast() context manager.GradScaler is a gradient scaling tool for scaling the value of the gradient during training. This helps to solve the problem of disappearing or exploding gradients and improves model stability and training results. When using GradScaler, the gradient needs to be multiplied by a scaling factor before back-propagation is performed. The scaling factor can be dynamically adjusted according to the value of the gradient to ensure the stability of the gradient. from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, log_loss import pickle from torch.utils.data import DataLoader from torch.cuda.amp import autocast, GradScaler import warnings import sys import pandas as pd import os import gc import sys import math import time import random import shutil from pathlib import Path from contextlib import contextmanager from collections import defaultdict, Counter import cv2 import scipy as sp import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm.auto import tqdm from functools import partial import argparse import importlib import torch import torch.nn as nn from torch.optim import Adam, SGD, AdamW import datetime # sys.path.append('/kaggle/input/pretrainedmodels/pretrainedmodels-0.7.4') # sys.path.append('/kaggle/input/efficientnet-pytorch/EfficientNet-PyTorch-master') # sys.path.append('/kaggle/input/timm-pytorch-image-models/pytorch-image-models-master') # sys.path.append('/kaggle/input/segmentation-models-pytorch/segmentation_models.pytorch-master') import segmentation_models_pytorch as smp # For segmentation_models_pytorch, it is a PyTorch-based deep learning library for image segmentation tasks. # It supports many popular segmentation models, such as UNet, LinkNet, FPN, etc., and provides many pre-trained models and datasets to facilitate users to quickly build and train their own models. import numpy as np from torch.utils.data import DataLoader, Dataset import cv2 import torch import os import albumentations as A from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform # ## config class CFG: # ============== comp exp name ============= comp_name = "vesuvius" # comp_dir_path = './' comp_dir_path = "/kaggle/input/" comp_folder_name = "vesuvius-challenge-ink-detection" # comp_dataset_path = f'{comp_dir_path}datasets/{comp_folder_name}/' comp_dataset_path = f"{comp_dir_path}{comp_folder_name}/" exp_name = "vesuvius_2d_slide_exp002" # ============== pred target ============= target_size = 1 # ============== model cfg ============= # Image segmentation model Pre-training parameters for convolutional networks model_name = "Unet" # backbone = 'efficientnet-b0' backbone = "resnext101_32x4d" # backbone = 'resnext50_32x4d' # backbone = 'resnet50' # There are 65 "channels", # a three-dimensional image of a certain dimension split into multiple two-dimensional images, # the ink will be immersed in the deeper the image, the larger the information more complete in_chans = 3 # 64 # ============== training cfg ============= # Size modification of input image or mask image size = 224 tile_size = 224 # The number of small images cut out and the position of each small image can be controlled by setting CFG.stride. stride = tile_size // 2 train_batch_size = 16 # 32 valid_batch_size = train_batch_size * 2 use_amp = True # Learning rate regulator scheduler = "GradualWarmupSchedulerV2" # scheduler = 'CosineAnnealingLR' epochs = 10 # 30 """ warmup_factor is a scaling factor that controls the rate of learning rate increase. It is usually used at the beginning of training to accelerate the learning rate increase if the model weights are not yet accurate enough. lr is the learning rate size, and in this equation, the learning rate is divided by 10 with the effect of warmup_factor. This is because at the beginning of the training, the learning rate should be relatively small so that the model can converge better, and as the training proceeds, the learning rate will gradually increase so that the weight space can be explored better. Thus, with this formula, a relatively small learning rate can be used at the beginning of training and gradually increased to the appropriate size. """ # adamW warmup_factor = 10 # lr = 1e-4 / warmup_factor lr = 1e-4 / warmup_factor # ============== fold ============= # k-fold cross-validation This method has the advantage of making better use of the data while reducing errors due to the chance of data division. # The disadvantage is that k-times training and validation are required and the computational cost is high. # This seems to mean that each of the three folders is used as a validation set valid_id = 1 """ In this example, metric_direction is set to 'maximize', indicating that the metric we want to optimize should be as large as possible. """ # objective_cv = 'binary' # 'binary', 'multiclass', 'regression' metric_direction = "maximize" # maximize, 'minimize' # metrics = 'dice_coef' # ============== fixed ============= pretrained = True inf_weight = "best" # 'best' min_lr = 1e-6 weight_decay = 1e-6 max_grad_norm = 1000 print_freq = 50 num_workers = 10 # Fixed seeds make code reproducible # Randomness in deep learning is caused by factors such as weight initialization, random sampling, etc. These randomness can make the model more expressive and generalizable. # However, since deep learning models are usually very large, their training requires a lot of time and computational resources. Therefore, it is useful to fix random seeds in order to make the experiments reproducible. # Using a fixed random seed ensures that the same sequence of random numbers is generated using the same random number generator each time the experiment is run. # This allows researchers to get the same results when running experiments on different machines, thus facilitating the comparison and validation of results. # In addition, using a fixed random seed also makes the model's behavior more predictable during training, thus helping researchers to better understand the model's performance and behavior. seed = 42 # ============== set dataset path ============= print("set dataset path") outputs_path = f"/kaggle/working/outputs/{comp_name}/{exp_name}/" submission_dir = outputs_path + "submissions/" submission_path = submission_dir + f"submission_{exp_name}.csv" model_dir = outputs_path + f"{comp_name}-models/" figures_dir = outputs_path + "figures/" log_dir = outputs_path + "logs/" log_path = log_dir + f"{exp_name}.txt" # ============== augmentation ============= # Data Enhancement train_aug_list = [ # A.RandomResizedCrop( # size, size, scale=(0.85, 1.0)), A.Resize(size, size), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.RandomBrightnessContrast(p=0.75), A.ShiftScaleRotate(p=0.75), A.OneOf( [ A.GaussNoise(var_limit=[10, 50]), A.GaussianBlur(), A.MotionBlur(), ], p=0.4, ), A.GridDistortion(num_steps=5, distort_limit=0.3, p=0.5), A.CoarseDropout( max_holes=1, max_width=int(size * 0.3), max_height=int(size * 0.3), mask_fill_value=0, p=0.5, ), # A.Cutout(max_h_size=int(size * 0.6), # max_w_size=int(size * 0.6), num_holes=1, p=1.0), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] valid_aug_list = [ A.Resize(size, size), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] # ## helper # Computes and stores the average and current value # Specifically, it serves to call the update method to update the value of the instance each time the average needs to be calculated, and to call the avg method to return the average when the average needs to be calculated. class AverageMeter(object): """Computes and stores the average and current value""" # Initialize the property values of the instance def __init__(self): self.reset() # Reset the property value of the instance to 0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 # Update the property value of the instance to the given value def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count # # log # accepts one argument log_file log file path # The function initializes a logger object, which is used to record logging information when the program is running. This is implemented as follows: def init_logger(log_file): from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter("%(message)s")) handler2 = FileHandler(filename=log_file) handler2.setFormatter(Formatter("%(message)s")) logger.addHandler(handler1) logger.addHandler(handler2) return logger def set_seed(seed=None, cudnn_deterministic=True): if seed is None: seed = 42 os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = cudnn_deterministic torch.backends.cudnn.benchmark = False # This function creates these directories using the os.makedirs() method and does not overwrite the directories if they already exist, i.e. the exist_ok=True argument means that no errors will be reported. # The purpose of this function is to create these directories when needed for use in subsequent code. def make_dirs(cfg): for dir in [cfg.model_dir, cfg.figures_dir, cfg.submission_dir, cfg.log_dir]: os.makedirs(dir, exist_ok=True) # # Initialization functions def cfg_init(cfg, mode="train"): set_seed(cfg.seed) # set_env_name() # set_dataset_path(cfg) if mode == "train": make_dirs(cfg) cfg_init(CFG) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") Logger = init_logger(log_file=CFG.log_path) Logger.info("\n\n-------- exp_info -----------------") # Logger.info(datetime.datetime.now().strftime('%Y年%m月%d日 %H:%M:%S')) # ## image, mask # The purpose of this code is to find a window of length CFG.in_chans, so that the subscript of the center point of the window is mid. # Specifically, first divide mid by 2, then subtract half of CFG.in_chans from the result to get the start subscript of the window, then add half of CFG.in_chans to mid to get the end subscript of the window. # Then add half of CFG.in_chans to mid to get the end of the window. # Finally, use the range function to generate a sequence of integers idxs from start to end-1. # # **Set the number of images to be read from 0-65, and adjust the number of input "channels" by changing in_chans** def read_image_mask(fragment_id): images = [] # idxs = range(65) mid = 65 // 2 start = mid - CFG.in_chans // 2 end = mid + CFG.in_chans // 2 idxs = range(start, end) for i in tqdm(idxs): image = cv2.imread( CFG.comp_dataset_path + f"train/{fragment_id}/surface_volume/{i:02}.tif", 0 ) pad0 = CFG.tile_size - image.shape[0] % CFG.tile_size pad1 = CFG.tile_size - image.shape[1] % CFG.tile_size image = np.pad(image, [(0, pad0), (0, pad1)], constant_values=0) images.append(image) images = np.stack(images, axis=2) mask = cv2.imread(CFG.comp_dataset_path + f"train/{fragment_id}/inklabels.png", 0) mask = np.pad(mask, [(0, pad0), (0, pad1)], constant_values=0) mask = mask.astype("float32") mask /= 255.0 return images, mask def get_train_valid_dataset(): train_images = [] train_masks = [] valid_images = [] valid_masks = [] valid_xyxys = [] for fragment_id in range(1, 4): image, mask = read_image_mask(fragment_id) """ This code is generating a set of image crop coordinates that are used to split a large size image into smaller pieces for easier processing. Where image is the original image and CFG is a set of constant parameters. Specifically, the parameters of the range function in the code set the step size and range of the crop coordinates. image.shape[0] and image.shape[1] denote the height and width of the image, respectively. Subtracting CFG.tile_size is to ensure that the segmented chunks are all squares with CFG.tile_size as the side length. The final x1_list and y1_list are the coordinates of all the generated images, which are used for subsequent processing. """ x1_list = list(range(0, image.shape[1] - CFG.tile_size + 1, CFG.stride)) y1_list = list(range(0, image.shape[0] - CFG.tile_size + 1, CFG.stride)) # where y1_list and x1_list are the list of starting coordinates in the width and height directions of the large image, respectively, and CFG.tile_size is the size of each small image block. # If fragment_id is equal to CFG.valid_id, the current image block and mask are stored in the list of valid_images and valid_masks # and store the start and end coordinates corresponding to the current image block into the valid_xyxys list; otherwise store them into the train_images and train_masks lists. for y1 in y1_list: for x1 in x1_list: y2 = y1 + CFG.tile_size x2 = x1 + CFG.tile_size # xyxys.append((x1, y1, x2, y2)) # Place the set folder with the corresponding label in the validation data set, folder 1 or folder 2 or folder 3 if fragment_id == CFG.valid_id: valid_images.append(image[y1:y2, x1:x2]) valid_masks.append(mask[y1:y2, x1:x2, None]) valid_xyxys.append([x1, y1, x2, y2]) # unspecified data sets into the training set, 2 training sets 1 validation set else: train_images.append(image[y1:y2, x1:x2]) train_masks.append(mask[y1:y2, x1:x2, None]) return train_images, train_masks, valid_images, valid_masks, valid_xyxys ( train_images, train_masks, valid_images, valid_masks, valid_xyxys, ) = get_train_valid_dataset() valid_xyxys = np.stack(valid_xyxys) # ## dataset import numpy as np from torch.utils.data import DataLoader, Dataset import cv2 import torch import os import albumentations as A from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform def get_transforms(data, cfg): if data == "train": aug = A.Compose(cfg.train_aug_list) elif data == "valid": aug = A.Compose(cfg.valid_aug_list) # print(aug) return aug class CustomDataset(Dataset): def __init__(self, images, cfg, labels=None, transform=None): self.images = images self.cfg = cfg self.labels = labels self.transform = transform def __len__(self): # return len(self.df) return len(self.images) def __getitem__(self, idx): image = self.images[idx] label = self.labels[idx] if self.transform: data = self.transform(image=image, mask=label) image = data["image"] label = data["mask"] return image, label # # Training set and validation set data processing train_dataset = CustomDataset( train_images, CFG, labels=train_masks, transform=get_transforms(data="train", cfg=CFG), ) valid_dataset = CustomDataset( valid_images, CFG, labels=valid_masks, transform=get_transforms(data="valid", cfg=CFG), ) train_loader = DataLoader( train_dataset, batch_size=CFG.train_batch_size, shuffle=True, num_workers=CFG.num_workers, pin_memory=True, drop_last=True, ) valid_loader = DataLoader( valid_dataset, batch_size=CFG.valid_batch_size, shuffle=False, num_workers=CFG.num_workers, pin_memory=True, drop_last=False, ) train_dataset[0][0].shape plot_dataset = CustomDataset(train_images, CFG, labels=train_masks) transform = CFG.train_aug_list transform = A.Compose( [t for t in transform if not isinstance(t, (A.Normalize, ToTensorV2))] ) plot_count = 0 for i in range(1000): image, mask = plot_dataset[i] data = transform(image=image, mask=mask) aug_image = data["image"] aug_mask = data["mask"] if mask.sum() == 0: continue fig, axes = plt.subplots(1, 4, figsize=(15, 8)) axes[0].imshow(image[..., 0], cmap="gray") axes[1].imshow(mask, cmap="gray") axes[2].imshow(aug_image[..., 0], cmap="gray") axes[3].imshow(aug_mask, cmap="gray") plt.savefig(CFG.figures_dir + f"aug_fold_{CFG.valid_id}_{plot_count}.png") plot_count += 1 if plot_count == 5: break del plot_dataset gc.collect() # ## model # **Unet convolutional network construction** # This code is initializing the image segmentation task using the Unet model from the segmentation_models_pytorch library. class CustomModel(nn.Module): def __init__(self, cfg, weight=None): super().__init__() self.cfg = cfg # The encoder_name parameter is used to specify which pre-trained encoder model to use. Here, a pre-trained model is used, so the encoder_name parameter is specified. # The advantage of this is that the feature extraction capability of the existing pre-trained model can be used to accelerate the training of the model and improve the accuracy of the model. Also, different pre-trained models can be selected according to actual needs to achieve better results. # We can use the pre-training weights of the image classification model to initialize the convolutional layer of the UNet network, thus improving the performance and generalization ability of the model # encoder_name: indicates the name of the pre-trained model used # encoder_weights: indicates the weights of the pre-trained model used, usually imagenet is used. # in_channels: indicates the number of channels of the input image, e.g., 3 channels for RGB images # classes: the number of classifications, usually the number of pixel classifications for image segmentation tasks. # activation: indicates the activation function, usually None (no activation function is used) or sigmoid (the output is between 0 and 1). self.encoder = smp.Unet( encoder_name=cfg.backbone, encoder_weights=weight, in_channels=cfg.in_chans, classes=cfg.target_size, activation=None, ) # After the initialization, the input image can be fed into the model for forward propagation to obtain the corresponding segmentation results. def forward(self, image): output = self.encoder(image) # output = output.squeeze(-1) return output # weight="imagenet" def build_model(cfg, weight="ssl"): print("model_name", cfg.model_name) print("backbone", cfg.backbone) model = CustomModel(cfg, weight) return model # ## scheduler # [https://www.kaggle.com/code/underwearfitting/single-fold-training-of-resnet200d-lb0-965](http://) # Its main role is to gradually increase the learning rate at the beginning of training to help the model converge faster. It is a subclass of the GradualWarmupScheduler class with the addition of an after_scheduler parameter for using other learning rate scheduling methods after the warm-up is over. # In each epoch, the get_lr() method will return the current learning rate. # At the beginning of training, the learning rate will be gradually increased from the initial value to the maximum value, after which the learning rate returned by after_scheduler (another learning rate update method) will be used. If after_scheduler is not provided, the current learning rate will continue to be used. # import torch.nn as nn import torch import math import time import numpy as np import torch from torch.optim.lr_scheduler import ( CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau, ) from warmup_scheduler import GradualWarmupScheduler class GradualWarmupSchedulerV2(GradualWarmupScheduler): def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None): super(GradualWarmupSchedulerV2, self).__init__( optimizer, multiplier, total_epoch, after_scheduler ) def get_lr(self): if self.last_epoch > self.total_epoch: if self.after_scheduler: if not self.finished: self.after_scheduler.base_lrs = [ base_lr * self.multiplier for base_lr in self.base_lrs ] self.finished = True return self.after_scheduler.get_lr() return [base_lr * self.multiplier for base_lr in self.base_lrs] if self.multiplier == 1.0: return [ base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs ] else: return [ base_lr * ((self.multiplier - 1.0) * self.last_epoch / self.total_epoch + 1.0) for base_lr in self.base_lrs ] # This code is a function to get the learning rate scheduler with input parameters of configuration (cfg) and optimizer (optimizer). Two learning rate schedulers are used in this function, CosineAnnealingLR and GradualWarmupSchedulerV2. # Among them, CosineAnnealingLR is a cosine annealing learning rate scheduler, whose function is to gradually reduce the learning rate during training to make the model converge more stably. # GradualWarmupSchedulerV2 is a learning rate preheating scheduler, whose role is to gradually increase the learning rate at the beginning of training to avoid the model from falling into a local optimum solution at the beginning and failing to jump out. # In this function, we first define a CosineAnnealingLR scheduler, setting it to the total number of training rounds (cfg.epochs) as cycles and a minimum learning rate of 1e-7. # Then, we use this scheduler as the after_scheduler parameter of GradualWarmupSchedulerV2, and also use it as the parameter of the input optimizer, multiplier indicates the learning rate multiplier during warm-up, and total_epoch indicates the number of warm-up rounds. # Finally, the GradualWarmupSchedulerV2 scheduler is returned as the output. def get_scheduler(cfg, optimizer): scheduler_cosine = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, cfg.epochs, eta_min=1e-7 ) scheduler = GradualWarmupSchedulerV2( optimizer, multiplier=10, total_epoch=1, after_scheduler=scheduler_cosine ) return scheduler def scheduler_step(scheduler, avg_val_loss, epoch): scheduler.step(epoch) # This line of code uses the AdamW optimizer, which takes the parameters of the model as input and uses the learning rate of lr from CFG. AdamW is a variant of the Adam optimizer that is used to update the model parameters when training a neural network. # It updates the parameters by weighted averaging the gradients of the parameters to minimize the loss function.AdamW also uses a regularization method called weight decay to prevent the model from overfitting the training data. model = build_model(CFG) model.to(device) optimizer = AdamW(model.parameters(), lr=CFG.lr) scheduler = get_scheduler(CFG, optimizer) # ## loss # Dice Loss is a commonly used loss function for image segmentation, which is based on the binary cross-entropy loss function and the Dice coefficient. # The Dice coefficient is used to evaluate the similarity between the predicted segmentation result and the true label, which is defined as twice the ratio of the intersection size of the two sets to their concatenation size. # And Dice Loss is defined as 1 minus the Dice coefficient as the loss function, i.e., Dice Loss = 1 - (2 * (the intersection size of the predicted segmentation result and the true label) / (the size of the predicted segmentation result + the size of the true label)). # During the training process, the optimizer minimizes the Dice Loss so as to maximize the similarity between the predicted segmentation result and the true labels. # BCELoss is a Binary Cross Entropy Loss function, which is usually used in binary classification problems. # It measures the performance of the model by calculating the difference between the model prediction results and the true labels. In the # binary cross entropy loss function, for each sample, we denote its true label as 0 or 1, and the prediction result is also a probability value between 0 and 1. DiceLoss = smp.losses.DiceLoss(mode="binary") BCELoss = smp.losses.SoftBCEWithLogitsLoss() alpha = 0.5 beta = 1 - alpha # This is a loss function that uses the Tversky index, a metric used to evaluate the degree of similarity between two sets. In this case # it is used to assess the degree of similarity between the predicted and true results of the model. # alpha and beta are hyperparameters that are used to adjust the weights of the Tversky index. This loss function is used for the binary classification problem. # The log_loss parameter determines whether the logarithmic loss function is used. # smp refers to the Segmentation Models PyTorch library, which is a deep learning library for image segmentation tasks. TverskyLoss = smp.losses.TverskyLoss( mode="binary", log_loss=False, alpha=alpha, beta=beta ) def criterion(y_pred, y_true): # return 0.5 * BCELoss(y_pred, y_true) + 0.5 * DiceLoss(y_pred, y_true) return BCELoss(y_pred, y_true) # return 0.5 * BCELoss(y_pred, y_true) + 0.5 * TverskyLoss(y_pred, y_true) # ## train, val def train_fn(train_loader, model, criterion, optimizer, device): model.train() """ GradScaler is a tool in PyTorch for mixed-accuracy training that automatically scales gradient values to avoid gradient underflow when computing at FP16 precision. In the configuration, GradScaler is enabled if AMP (Automatic Mixed Precision) is used. """ scaler = GradScaler(enabled=CFG.use_amp) """ This is a tool class for calculating the average loss. When training a neural network, it is common to calculate the losses for each batch (batches) and add them up to a total loss value. In order to get the average loss, the total loss value needs to be divided by the number of batches. The AverageMeter class encapsulates this process by making it easy to record the total loss value and the number of batches and to calculate the average loss value. After each batch, the total loss value and batch count can be updated by calling the update() method of this class, and finally the average loss value can be obtained by calling the avg property of this class. """ losses = AverageMeter() for step, (images, labels) in tqdm( enumerate(train_loader), total=len(train_loader) ): images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) with autocast(CFG.use_amp): y_preds = model(images) loss = criterion(y_preds, labels) # Backpropagation and gradient calculation for the loss function # scaler.scale(loss) is gradient scaling using PyTorch's GradScaler, this is to prevent gradient explosion or gradient disappearance during backpropagation. backward() is to backpropagate the parameters of the model and calculate the gradient. losses.update(loss.item(), batch_size) scaler.scale(loss).backward() # This code is used to perform gradient cropping. During the training of a deep learning model, the gradient values may become very large, which can lead to instability of the model. To avoid this, we can use the gradient cropping method to keep the gradient value within an acceptable range. grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), CFG.max_grad_norm ) # Specifically, scaler.step(optimizer) is the optimizer's gradient update on mixed precision # scaler.update() is used to update the scaling factor inside the scaler. # optimizer.zero_grad() is used to clear the gradient information in the optimizer so that the gradient can be recalculated in the next iteration. scaler.step(optimizer) scaler.update() optimizer.zero_grad() return losses.avg def valid_fn(valid_loader, model, criterion, device, valid_xyxys, valid_mask_gt): mask_pred = np.zeros(valid_mask_gt.shape) mask_count = np.zeros(valid_mask_gt.shape) model.eval() losses = AverageMeter() for step, (images, labels) in tqdm( enumerate(valid_loader), total=len(valid_loader) ): images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) with torch.no_grad(): y_preds = model(images) loss = criterion(y_preds, labels) losses.update(loss.item(), batch_size) """ This code is used to generate the prediction masks, predicting the mask value for each pixel point based on the model's output, and then assigning it to the corresponding region of the mask. where y_preds is the output of the model, which is mapped to between [0,1] as the mask value by the sigmoid function; valid_xyxys is the coordinate and size information of the validation set images, and start_idx and end_idx are the corresponding start and end indexes of the currently processed batch in valid_xyxys; mask_pred and mask_count are the generated masks and the number of times each pixel point is assigned. Finally, the mask value is divided by the number of assignments to get the average mask value of each pixel point. """ # make whole mask y_preds = torch.sigmoid(y_preds).to("cpu").numpy() start_idx = step * CFG.valid_batch_size end_idx = start_idx + batch_size for i, (x1, y1, x2, y2) in enumerate(valid_xyxys[start_idx:end_idx]): mask_pred[y1:y2, x1:x2] += y_preds[i].squeeze(0) mask_count[y1:y2, x1:x2] += np.ones((CFG.tile_size, CFG.tile_size)) print(f"mask_count_min: {mask_count.min()}") mask_pred /= mask_count return losses.avg, mask_pred # ## metrics from sklearn.metrics import fbeta_score def fbeta_numpy(targets, preds, beta=0.5, smooth=1e-5): """ https://www.kaggle.com/competitions/vesuvius-challenge-ink-detection/discussion/397288 """ y_true_count = targets.sum() ctp = preds[targets == 1].sum() cfp = preds[targets == 0].sum() beta_squared = beta * beta c_precision = ctp / (ctp + cfp + smooth) c_recall = ctp / (y_true_count + smooth) dice = ( (1 + beta_squared) * (c_precision * c_recall) / (beta_squared * c_precision + c_recall + smooth) ) return dice # The purpose of this code is to calculate the F-beta score between a given mask and a predicted mask and return the best threshold and the best F-beta score. # It calculates the F-beta score by spreading the mask and the prediction mask into a one-dimensional array and looping over a series of thresholds. # The optimal threshold is the threshold that maximizes the F-beta score, and the best F-beta score is the F-beta score calculated at the optimal threshold. def calc_fbeta(mask, mask_pred): mask = mask.astype(int).flatten() mask_pred = mask_pred.flatten() best_th = 0 best_dice = 0 for th in np.array(range(10, 50 + 1, 5)) / 100: # dice = fbeta_score(mask, (mask_pred >= th).astype(int), beta=0.5) dice = fbeta_numpy(mask, (mask_pred >= th).astype(int), beta=0.5) print(f"th: {th}, fbeta: {dice}") if dice > best_dice: best_dice = dice best_th = th Logger.info(f"best_th: {best_th}, fbeta: {best_dice}") return best_dice, best_th # This code defines a function called calc_cv that takes two arguments: mask_gt and mask_pred. These two arguments represent the true mask (i.e., ground truth) and the model prediction mask, respectively. # The function internally calls the calc_fbeta function to calculate the best Dice coefficient and the best threshold, and returns them as a tuple. # The Dice coefficient is a commonly used measure of the similarity of two masks and takes values from 0 to 1, with higher values indicating higher similarity. The threshold is a parameter needed to convert the mask into a binarized image. def calc_cv(mask_gt, mask_pred): best_dice, best_th = calc_fbeta(mask_gt, mask_pred) return best_dice, best_th # ## main fragment_id = CFG.valid_id valid_mask_gt = cv2.imread( CFG.comp_dataset_path + f"train/{fragment_id}/inklabels.png", 0 ) valid_mask_gt = valid_mask_gt / 255 """ The purpose of this code is to zero-fill the valid_mask_gt array so that the number of rows is a multiple of CFG.tile_size. Specifically, pad0 and pad1 represent the number of zeros to be filled in the first and second dimensions of the valid_mask_gt array, respectively, so that both valid_mask_gt.shape[0] and valid_mask_gt.shape[1] are multiples of CFG.tile_size. """ pad0 = CFG.tile_size - valid_mask_gt.shape[0] % CFG.tile_size pad1 = CFG.tile_size - valid_mask_gt.shape[1] % CFG.tile_size valid_mask_gt = np.pad(valid_mask_gt, [(0, pad0), (0, pad1)], constant_values=0) """ 这行代码使用了NumPy中的`pad`函数,将`valid_mask_gt`数组在两个维度上进行了填充, 以便与另一个数组进行操作时具有相同的形状。具体来说,`[(0, pad0), (0, pad1)]`表示在第一个维度上不进行填充(前面填0个,后面填0个), 在第二个维度上填充`pad1`个0在后面,填充`pad0`个0在前面。 这样做的目的是将`valid_mask_gt`数组的形状扩展到与另一个数组相同,以便进行一些操作,例如相加、相减等。 """ fold = CFG.valid_id """ This code initializes the variables for the best score based on the direction of the evaluation metric. If the evaluation indicator is of the "minimization" type, the initial value should be positive infinity (np.inf); If the evaluation indicator is of the "maximize" type, then the initial value should be negative one (-1). This ensures that the value of the best score can be updated if a better score emerges during the subsequent evaluation. """ if CFG.metric_direction == "minimize": best_score = np.inf elif CFG.metric_direction == "maximize": best_score = -1 best_loss = np.inf for epoch in range(CFG.epochs): start_time = time.time() # train avg_loss = train_fn(train_loader, model, criterion, optimizer, device) # eval avg_val_loss, mask_pred = valid_fn( valid_loader, model, criterion, device, valid_xyxys, valid_mask_gt ) scheduler_step(scheduler, avg_val_loss, epoch) best_dice, best_th = calc_cv(valid_mask_gt, mask_pred) # score = avg_val_loss score = best_dice elapsed = time.time() - start_time Logger.info( f"Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s" ) # Logger.info(f'Epoch {epoch+1} - avgScore: {avg_score:.4f}') Logger.info(f"Epoch {epoch+1} - avgScore: {score:.4f}") """ This code is used to update the best score based on the direction of the evaluation metric (whether it is minimized or maximized). If the direction of the evaluation metric is minimize, the best score is updated to the new score if the new score is lower than the current best score. If the direction of the evaluation metric is maximization, the best score is updated to the new score if the new score is higher than the current best score. This allows tracking the best model during training and using that model during evaluation. """ if CFG.metric_direction == "minimize": update_best = score < best_score elif CFG.metric_direction == "maximize": update_best = score > best_score if update_best: best_loss = avg_val_loss best_score = score Logger.info(f"Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model") Logger.info(f"Epoch {epoch+1} - Save Best Loss: {best_loss:.4f} Model") torch.save( {"model": model.state_dict(), "preds": mask_pred}, CFG.model_dir + f"{CFG.model_name}_fold{fold}_best.pth", ) check_point = torch.load( CFG.model_dir + f"{CFG.model_name}_fold{fold}_{CFG.inf_weight}.pth", map_location=torch.device("cpu"), ) mask_pred = check_point["preds"] best_dice, best_th = calc_fbeta(valid_mask_gt, mask_pred) fig, axes = plt.subplots(1, 3, figsize=(15, 8)) axes[0].imshow(valid_mask_gt) axes[1].imshow(mask_pred) axes[2].imshow((mask_pred >= best_th).astype(int)) plt.hist(mask_pred.flatten(), bins=20)
false
0
10,420
0
10,508
10,420
129977265
# ## Background Info # In sentiment analysis using models like RoBERTa, the predicted probabilities for different sentiment classes (such as neg, neu, and pos) represent the model's confidence or likelihood for each sentiment class independently, and they are typically outputted as normalized values between 0 and 1. # The probabilities for neg, neu, and pos are relative to each other and represent the model's estimated likelihood or confidence for each sentiment category. For example, if the predicted probabilities are [0.2, 0.5, 0.3] for neg, neu, and pos respectively, it means the model assigns a 20% probability to negative sentiment, 50% probability to neutral sentiment, and 30% probability to positive sentiment for the given input. # ## Import Necessary Modules import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # for the graphs import seaborn as sns plt.style.use("ggplot") import nltk # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## Read In Data # Read in data in a data frame df = pd.read_csv("../input/updated-movie-reviews-dataset/New_Audience.csv") df.head() df["reviewContent"].values[0] print(df.shape) # 1100 rows, 7 columns # Make the dataframe only take in rows 0-550, can change depending on computing power df = df.head(550) df.head() # ## Quick Exploratory Data Analysis (EDA) ax = ( df["reviewRating"] .value_counts() .sort_index() .plot(kind="line", title="Average Audience Review Ratings", figsize=(10, 5)) ) ax.set_xlabel("Review Ratings") ax.set_ylabel("Numbers of Reviews") ax.set_xticks(range(1, 11, 1)) plt.show() # ## Basic NLTK example = df["reviewContent"][50] print(example) tokens = nltk.word_tokenize(example) tokens[:10] tagged = nltk.pos_tag(tokens) tagged[:10] entities = nltk.ne_chunk(tagged) sliced_entities = entities[:10] nltk.pprint(sliced_entities) # entities.pprint() # ## Sentiment Analysis Version 1: Using VADER # VADER (Valence Aware Dictionary and sEntiment Reasoner) - Bag of words approach # > Using NLTK's `SentimentIntensityAnalyzer` to get the neg/neu/pos scores of the text. # * This uses a "bag of words approach: # 1. Stop words are removed (e.g. and, the) - just words used for structure # 2. each word is scored and combined to a total score. # *Note: This does not include relationship between words. from nltk.sentiment import SentimentIntensityAnalyzer from tqdm.notebook import tqdm sia = SentimentIntensityAnalyzer() sia.polarity_scores("You look lonely, I can fix that!") sia.polarity_scores("League of Legends is so fun xd") sia.polarity_scores(example) # Run the polarity score on the entire dataset result = {} for i, row in tqdm(df.iterrows(), total=len(df)): text = row["reviewContent"] myid = row["ID"] result[myid] = sia.polarity_scores(text) result_10 = dict(list(result.items())[:10]) result_10 vaders = pd.DataFrame(result).T vaders = vaders.reset_index().rename(columns={"index": "ID"}) vaders = vaders.merge(df, how="left") # Now we have sentiment score and metadata vaders.head() ax = sns.barplot(data=vaders, x="reviewRating", y="compound") ax.set_title("Compound Score by Audience Movie Reviews") plt.show() fig, axs = plt.subplots(1, 3, figsize=(15, 3)) sns.barplot(data=vaders, x="reviewRating", y="pos", ax=axs[0]) sns.barplot(data=vaders, x="reviewRating", y="neu", ax=axs[1]) sns.barplot(data=vaders, x="reviewRating", y="neg", ax=axs[2]) axs[0].set_title("Positive") axs[1].set_title("Neutral") axs[2].set_title("Negative") plt.tight_layout() plt.show() # ## Sentiment Analysis Version 2: Using RoRERTa Pretrained Model # * Use a model trained of a large corpus of data. # * Transformer model accounts for the words but also the context related to other words # Facebook AI's RoBERTa model was proposed in *RoBERTa: A Robustly Optimized BERT Pretraining Approach* by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. It is based on Google’s BERT model released in 2018. # The Vader model only looked at each words and scored it individually. But human language depend a lot on context, for example, a positive sentence may have negative words which could be sarcastic or related to other words, hence the Vader model would not pick up on the relationships between words. However, more recently, transformer-based deep learning models have become very popular as it can pick up on that context. # We are going to get RoBERTa model from HuggingFace. Link: https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment # *Note: The RoBERTa model or any transformer models are optimised to be run on a GPU. Notebook options -> Accelerator -> GPU from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" # Model from HuggingFace tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) # VADER results on example print(example) sia.polarity_scores(example) # Run for RoBERTa Model encoded_text = tokenizer(example, return_tensors="pt") output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) scores_dict = { "roberta_neg": scores[0], "roberta_neu": scores[1], "roberta_pos": scores[2], } print(scores_dict) def polarity_scores_roberta(example): encoded_text = tokenizer(example, return_tensors="pt") output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) scores_dict = { "roberta_neg": scores[0], "roberta_neu": scores[1], "roberta_pos": scores[2], } return scores_dict # Run the polarity score on the entire dataset, but this time for the RoBERTa model result = {} for i, row in tqdm(df.iterrows(), total=len(df)): try: # Some reviews are too big for the RoBERTa model so it will result in runtime error. We will skip those text = row["reviewContent"] myid = row["ID"] vader_result = sia.polarity_scores(text) vader_result_rename = {} for key, value in vader_result.items(): vader_result_rename[f"vader_{key}"] = value roberta_result = polarity_scores_roberta(text) both = {**vader_result_rename, **roberta_result} # combining two dict result[myid] = both except RuntimeError: print(f"Broke for id {myid}") both results_df = pd.DataFrame(result).T results_df = results_df.reset_index().rename(columns={"index": "ID"}) results_df = results_df.merge(df, how="left") results_df.head() # ## Compare and Data Visualisation results_df.columns sns.pairplot( data=results_df, vars=[ "vader_neg", "vader_neu", "vader_pos", "roberta_neg", "roberta_neu", "roberta_pos", ], hue="reviewRating", palette="tab10", ) plt.show() # ## Review Examples: # * Positive 1/10 and Negative 10/10 Reviews # Lets look at some examples where the model scoring and review score differ the most. results_df # A movie review that is said to be positive but the reviewer gave it a 1/10. What insight can we gain from this? results_df.query("1 <= reviewRating <= 2").sort_values("roberta_pos", ascending=False)[ ["ID", "roberta_neg", "roberta_neu", "roberta_pos", "reviewRating", "reviewContent"] ].values[0] specific_row = results_df.loc[results_df["ID"] == 149] print(specific_row) # A movie review that is said to be negative but the reviewer gave it a 10/10. What insight can we gain from this? results_df.query("reviewRating == 10").sort_values("roberta_neg", ascending=False)[ ["ID", "roberta_neg", "roberta_neu", "roberta_pos", "reviewRating", "reviewContent"] ].values[0]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/977/129977265.ipynb
null
null
[{"Id": 129977265, "ScriptId": 38604764, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15133337, "CreationDate": "05/17/2023 21:42:12", "VersionNumber": 3.0, "Title": "Sentiment Analysis CSS2", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 227.0, "LinesInsertedFromPrevious": 138.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 89.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## Background Info # In sentiment analysis using models like RoBERTa, the predicted probabilities for different sentiment classes (such as neg, neu, and pos) represent the model's confidence or likelihood for each sentiment class independently, and they are typically outputted as normalized values between 0 and 1. # The probabilities for neg, neu, and pos are relative to each other and represent the model's estimated likelihood or confidence for each sentiment category. For example, if the predicted probabilities are [0.2, 0.5, 0.3] for neg, neu, and pos respectively, it means the model assigns a 20% probability to negative sentiment, 50% probability to neutral sentiment, and 30% probability to positive sentiment for the given input. # ## Import Necessary Modules import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # for the graphs import seaborn as sns plt.style.use("ggplot") import nltk # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## Read In Data # Read in data in a data frame df = pd.read_csv("../input/updated-movie-reviews-dataset/New_Audience.csv") df.head() df["reviewContent"].values[0] print(df.shape) # 1100 rows, 7 columns # Make the dataframe only take in rows 0-550, can change depending on computing power df = df.head(550) df.head() # ## Quick Exploratory Data Analysis (EDA) ax = ( df["reviewRating"] .value_counts() .sort_index() .plot(kind="line", title="Average Audience Review Ratings", figsize=(10, 5)) ) ax.set_xlabel("Review Ratings") ax.set_ylabel("Numbers of Reviews") ax.set_xticks(range(1, 11, 1)) plt.show() # ## Basic NLTK example = df["reviewContent"][50] print(example) tokens = nltk.word_tokenize(example) tokens[:10] tagged = nltk.pos_tag(tokens) tagged[:10] entities = nltk.ne_chunk(tagged) sliced_entities = entities[:10] nltk.pprint(sliced_entities) # entities.pprint() # ## Sentiment Analysis Version 1: Using VADER # VADER (Valence Aware Dictionary and sEntiment Reasoner) - Bag of words approach # > Using NLTK's `SentimentIntensityAnalyzer` to get the neg/neu/pos scores of the text. # * This uses a "bag of words approach: # 1. Stop words are removed (e.g. and, the) - just words used for structure # 2. each word is scored and combined to a total score. # *Note: This does not include relationship between words. from nltk.sentiment import SentimentIntensityAnalyzer from tqdm.notebook import tqdm sia = SentimentIntensityAnalyzer() sia.polarity_scores("You look lonely, I can fix that!") sia.polarity_scores("League of Legends is so fun xd") sia.polarity_scores(example) # Run the polarity score on the entire dataset result = {} for i, row in tqdm(df.iterrows(), total=len(df)): text = row["reviewContent"] myid = row["ID"] result[myid] = sia.polarity_scores(text) result_10 = dict(list(result.items())[:10]) result_10 vaders = pd.DataFrame(result).T vaders = vaders.reset_index().rename(columns={"index": "ID"}) vaders = vaders.merge(df, how="left") # Now we have sentiment score and metadata vaders.head() ax = sns.barplot(data=vaders, x="reviewRating", y="compound") ax.set_title("Compound Score by Audience Movie Reviews") plt.show() fig, axs = plt.subplots(1, 3, figsize=(15, 3)) sns.barplot(data=vaders, x="reviewRating", y="pos", ax=axs[0]) sns.barplot(data=vaders, x="reviewRating", y="neu", ax=axs[1]) sns.barplot(data=vaders, x="reviewRating", y="neg", ax=axs[2]) axs[0].set_title("Positive") axs[1].set_title("Neutral") axs[2].set_title("Negative") plt.tight_layout() plt.show() # ## Sentiment Analysis Version 2: Using RoRERTa Pretrained Model # * Use a model trained of a large corpus of data. # * Transformer model accounts for the words but also the context related to other words # Facebook AI's RoBERTa model was proposed in *RoBERTa: A Robustly Optimized BERT Pretraining Approach* by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. It is based on Google’s BERT model released in 2018. # The Vader model only looked at each words and scored it individually. But human language depend a lot on context, for example, a positive sentence may have negative words which could be sarcastic or related to other words, hence the Vader model would not pick up on the relationships between words. However, more recently, transformer-based deep learning models have become very popular as it can pick up on that context. # We are going to get RoBERTa model from HuggingFace. Link: https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment # *Note: The RoBERTa model or any transformer models are optimised to be run on a GPU. Notebook options -> Accelerator -> GPU from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" # Model from HuggingFace tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) # VADER results on example print(example) sia.polarity_scores(example) # Run for RoBERTa Model encoded_text = tokenizer(example, return_tensors="pt") output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) scores_dict = { "roberta_neg": scores[0], "roberta_neu": scores[1], "roberta_pos": scores[2], } print(scores_dict) def polarity_scores_roberta(example): encoded_text = tokenizer(example, return_tensors="pt") output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) scores_dict = { "roberta_neg": scores[0], "roberta_neu": scores[1], "roberta_pos": scores[2], } return scores_dict # Run the polarity score on the entire dataset, but this time for the RoBERTa model result = {} for i, row in tqdm(df.iterrows(), total=len(df)): try: # Some reviews are too big for the RoBERTa model so it will result in runtime error. We will skip those text = row["reviewContent"] myid = row["ID"] vader_result = sia.polarity_scores(text) vader_result_rename = {} for key, value in vader_result.items(): vader_result_rename[f"vader_{key}"] = value roberta_result = polarity_scores_roberta(text) both = {**vader_result_rename, **roberta_result} # combining two dict result[myid] = both except RuntimeError: print(f"Broke for id {myid}") both results_df = pd.DataFrame(result).T results_df = results_df.reset_index().rename(columns={"index": "ID"}) results_df = results_df.merge(df, how="left") results_df.head() # ## Compare and Data Visualisation results_df.columns sns.pairplot( data=results_df, vars=[ "vader_neg", "vader_neu", "vader_pos", "roberta_neg", "roberta_neu", "roberta_pos", ], hue="reviewRating", palette="tab10", ) plt.show() # ## Review Examples: # * Positive 1/10 and Negative 10/10 Reviews # Lets look at some examples where the model scoring and review score differ the most. results_df # A movie review that is said to be positive but the reviewer gave it a 1/10. What insight can we gain from this? results_df.query("1 <= reviewRating <= 2").sort_values("roberta_pos", ascending=False)[ ["ID", "roberta_neg", "roberta_neu", "roberta_pos", "reviewRating", "reviewContent"] ].values[0] specific_row = results_df.loc[results_df["ID"] == 149] print(specific_row) # A movie review that is said to be negative but the reviewer gave it a 10/10. What insight can we gain from this? results_df.query("reviewRating == 10").sort_values("roberta_neg", ascending=False)[ ["ID", "roberta_neg", "roberta_neu", "roberta_pos", "reviewRating", "reviewContent"] ].values[0]
false
0
2,510
0
2,510
2,510
129977789
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from statsmodels.formula.api import ols from sklearn.linear_model import LinearRegression import matplotlib as mpl from statsmodels.api import qqplot import statsmodels.api as sm from statsmodels.formula.api import glm from statsmodels.formula.api import logit from statsmodels.formula.api import ols from statsmodels.stats.outliers_influence import variance_inflation_factor dirname = "/kaggle/input/economic-games-ba2/" # figure setting # Reset rcParams to default values mpl.rcParams.update() # globally setting seaborn sns.set(style="ticks", palette="muted", font_scale=1.2, context="talk") mpl.rcParams["lines.linewidth"] = 2 mpl.rcParams["lines.markersize"] = 10 mpl.rcParams["font.size"] = 16 all_avg_120 = pd.read_csv(dirname + "avg_120FA.csv") both75_avg_102 = pd.read_csv(dirname + "both75avg102FA.csv") both75_unfold_102 = pd.read_csv(dirname + "both75unfold102wFA.csv") dg75_avg_113 = pd.read_csv(dirname + "DG75avg113FA.csv") dg75_unfold_113 = pd.read_csv(dirname + "DG75unfold113wFA.csv") tg75_avg_106 = pd.read_csv(dirname + "TG75avg106FA.csv") tg75_unfold_106 = pd.read_csv(dirname + "TG75unfold106wFA.csv") sl_all_avg = all_avg_120.iloc[:, 4:].reset_index(drop=True) sl_both75_avg = both75_avg_102.iloc[:, 4:].reset_index(drop=True) sl_both75_unfold = both75_unfold_102.iloc[:, 2:].reset_index(drop=True) sl_dg75_avg = dg75_avg_113.iloc[:, 4:].reset_index(drop=True) sl_dg75_unfold = dg75_unfold_113.iloc[:, 2:].reset_index(drop=True) sl_tg75_avg = tg75_avg_106.iloc[:, 4:].reset_index(drop=True) sl_tg75_unfold = tg75_unfold_106.iloc[:, 2:].reset_index(drop=True) # add BLUPs predictiors from R # match columns to match from both df left_keys are in sl_..75_avg, right_keys are in blup left_keys = [ "Age", "Betrayal", "Anger", "Sadness", "Disgust", "Surprise", "Cog_Motivate", "Cog_Reasons", "Cog_Defend", "Cog_ToM", "IRIpt", "IRIfs", "IRIec", "IRIpd", ] right_keys = [ "age", "betrayal", "anger", "sadness", "disgust", "surprise", "motivation", "reason", "defend", "perspective", "IRI_pt", "IRI_fs", "IRI_ec", "IRI_pd", ] # import each df blup_df_both = pd.read_csv(dirname + "BLUP_predictors_both75_raw.csv") blup_df_dg = pd.read_csv(dirname + "BLUP_predictors_dg75_raw.csv") blup_df_tg = pd.read_csv(dirname + "BLUP_predictors_tg75_raw.csv") # columns you want to have in your dataset blup_cols = [ col for col in blup_df_both.columns if not col.endswith(".x") and ("c." in col or col in ["female.y", "british.y"]) ] sl_cols = list(sl_both75_avg.columns) sl_cols.extend(blup_cols) # merge two df based on given keys, return error if one row match multiple rows sl_both75_avg = sl_both75_avg.merge( blup_df_both, left_on=left_keys, right_on=right_keys, validate="one_to_one" ) # extract wanted columns sl_both75_avg = sl_both75_avg[sl_cols] # rename cols sl_both75_avg.columns = sl_both75_avg.columns.str.replace(".", "_").str.replace( "_y", "" ) # repeat for dg sl_dg75_avg = sl_dg75_avg.merge( blup_df_dg, left_on=left_keys, right_on=right_keys, validate="one_to_one" ) sl_dg75_avg = sl_dg75_avg[sl_cols] sl_dg75_avg.columns = sl_dg75_avg.columns.str.replace(".", "_").str.replace("_y", "") # repeat for tg sl_tg75_avg = sl_tg75_avg.merge( blup_df_tg, left_on=left_keys, right_on=right_keys, validate="one_to_one" ) sl_tg75_avg = sl_tg75_avg[sl_cols] sl_tg75_avg.columns = sl_tg75_avg.columns.str.replace(".", "_").str.replace("_y", "") # cut the cols before subNum (original approach) dataframes = [ sl_all_avg, sl_both75_avg, sl_both75_unfold, sl_dg75_avg, sl_dg75_unfold, sl_tg75_avg, sl_tg75_unfold, ] names = [ "sl_all_avg", "sl_both75_avg", "sl_both75_unfold", "sl_dg75_avg", "sl_dg75_unfold", "sl_tg75_avg", "sl_tg75_unfold", ] tg_multi = 3 for df, name in zip(dataframes, names): df["trustworthiness_avg"] = ( df["trstee_ST1"] / (tg_multi * 2) + df["trstee_ST2"] / (tg_multi * 4) + df["trstee_ST3"] / (tg_multi * 6) + df["trstee_ST4"] / (tg_multi * 8) + df["trstee_ST5"] / (tg_multi * 10) ) / 5 filename = f"{name}.csv" df.to_csv( filename, index=False ) # Save the dataframe as a CSV file using the file name sl_tg75_avg_blups = pd.read_csv("sl_tg75_avg.csv") sl_dg75_avg_blups = pd.read_csv("sl_dg75_avg.csv") sl_both75_avg_blups = pd.read_csv("sl_both75_avg.csv") ## mean center # tg75 sl_tg75_avg_blups["trustworthiness_c"] = ( sl_tg75_avg_blups["trustworthiness_avg"] - sl_tg75_avg_blups["trustworthiness_avg"].mean() ) sl_tg75_avg_blups["BIS11ATT_c"] = ( sl_tg75_avg_blups["BIS11ATT"] - sl_tg75_avg_blups["BIS11ATT"].mean() ) sl_tg75_avg_blups["BIS11MT_c"] = ( sl_tg75_avg_blups["BIS11MT"] - sl_tg75_avg_blups["BIS11MT"].mean() ) sl_tg75_avg_blups["BIS11NP_c"] = ( sl_tg75_avg_blups["BIS11NP"] - sl_tg75_avg_blups["BIS11NP"].mean() ) sl_tg75_avg_blups["trustAttitude_c"] = ( sl_tg75_avg_blups["trustAttitude"] - sl_tg75_avg_blups["trustAttitude"].mean() ) sl_tg75_avg_blups["riskTaking_c"] = ( sl_tg75_avg_blups["riskTaking"] - sl_tg75_avg_blups["riskTaking"].mean() ) sl_tg75_avg_blups["TG_exptRatio_c"] = ( sl_tg75_avg_blups["TG_exptRatio"] - sl_tg75_avg_blups["TG_exptRatio"].mean() ) sl_tg75_avg_blups["TG_exptRatioAdj_c"] = ( sl_tg75_avg_blups["TG_exptRatioAdj"] - sl_tg75_avg_blups["TG_exptRatioAdj"].mean() ) # dg75 sl_dg75_avg_blups["trustworthiness_c"] = ( sl_dg75_avg_blups["trustworthiness_avg"] - sl_dg75_avg_blups["trustworthiness_avg"].mean() ) sl_dg75_avg_blups["BIS11ATT_c"] = ( sl_dg75_avg_blups["BIS11ATT"] - sl_dg75_avg_blups["BIS11ATT"].mean() ) sl_dg75_avg_blups["BIS11MT_c"] = ( sl_dg75_avg_blups["BIS11MT"] - sl_dg75_avg_blups["BIS11MT"].mean() ) sl_dg75_avg_blups["BIS11NP_c"] = ( sl_dg75_avg_blups["BIS11NP"] - sl_dg75_avg_blups["BIS11NP"].mean() ) sl_dg75_avg_blups["trustAttitude_c"] = ( sl_dg75_avg_blups["trustAttitude"] - sl_dg75_avg_blups["trustAttitude"].mean() ) sl_dg75_avg_blups["riskTaking_c"] = ( sl_dg75_avg_blups["riskTaking"] - sl_dg75_avg_blups["riskTaking"].mean() ) sl_dg75_avg_blups["TG_exptRatio_c"] = ( sl_dg75_avg_blups["TG_exptRatio"] - sl_dg75_avg_blups["TG_exptRatio"].mean() ) sl_dg75_avg_blups["TG_exptRatioAdj_c"] = ( sl_dg75_avg_blups["TG_exptRatioAdj"] - sl_dg75_avg_blups["TG_exptRatioAdj"].mean() ) # RT log transformation sl_tg75_avg_blups["log_TG_trustorRT"] = np.log(sl_tg75_avg_blups["TG_trustorRT"]) sl_tg75_avg_blups["log_DG_dictatorRT"] = np.log(sl_tg75_avg_blups["DG_dictatorRT"]) sl_tg75_avg_blups["log_TG_expectRT"] = np.log(sl_tg75_avg_blups["TG_expectRT"]) sl_tg75_avg_blups["log_TG_closeRT"] = np.log(sl_tg75_avg_blups["TG_closeRT"]) sl_tg75_avg_blups["log_DG_closeRT"] = np.log(sl_tg75_avg_blups["DG_closeRT"]) sns.regplot(x="IRIec", y="BLUP_c_betrayal", data=sl_tg75_avg_blups) plt.xlabel("IRI ec") plt.ylabel("BLUP betrayal") plt.show() # all IRI to predict betrayal BLUPs betrayal_blups_vs_all_IRI = ols( "BLUP_c_betrayal ~ female + c_income + c_age + c_education + british + c_IRI_pt + c_IRI_fs + c_IRI_ec + c_IRI_pd ", data=sl_tg75_avg_blups, ).fit() betrayal_blups_vs_all_IRI.summary()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/977/129977789.ipynb
null
null
[{"Id": 129977789, "ScriptId": 38664122, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15152859, "CreationDate": "05/17/2023 21:50:08", "VersionNumber": 1.0, "Title": "Micro-Macro Model analysis economic games", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 148.0, "LinesInsertedFromPrevious": 148.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from statsmodels.formula.api import ols from sklearn.linear_model import LinearRegression import matplotlib as mpl from statsmodels.api import qqplot import statsmodels.api as sm from statsmodels.formula.api import glm from statsmodels.formula.api import logit from statsmodels.formula.api import ols from statsmodels.stats.outliers_influence import variance_inflation_factor dirname = "/kaggle/input/economic-games-ba2/" # figure setting # Reset rcParams to default values mpl.rcParams.update() # globally setting seaborn sns.set(style="ticks", palette="muted", font_scale=1.2, context="talk") mpl.rcParams["lines.linewidth"] = 2 mpl.rcParams["lines.markersize"] = 10 mpl.rcParams["font.size"] = 16 all_avg_120 = pd.read_csv(dirname + "avg_120FA.csv") both75_avg_102 = pd.read_csv(dirname + "both75avg102FA.csv") both75_unfold_102 = pd.read_csv(dirname + "both75unfold102wFA.csv") dg75_avg_113 = pd.read_csv(dirname + "DG75avg113FA.csv") dg75_unfold_113 = pd.read_csv(dirname + "DG75unfold113wFA.csv") tg75_avg_106 = pd.read_csv(dirname + "TG75avg106FA.csv") tg75_unfold_106 = pd.read_csv(dirname + "TG75unfold106wFA.csv") sl_all_avg = all_avg_120.iloc[:, 4:].reset_index(drop=True) sl_both75_avg = both75_avg_102.iloc[:, 4:].reset_index(drop=True) sl_both75_unfold = both75_unfold_102.iloc[:, 2:].reset_index(drop=True) sl_dg75_avg = dg75_avg_113.iloc[:, 4:].reset_index(drop=True) sl_dg75_unfold = dg75_unfold_113.iloc[:, 2:].reset_index(drop=True) sl_tg75_avg = tg75_avg_106.iloc[:, 4:].reset_index(drop=True) sl_tg75_unfold = tg75_unfold_106.iloc[:, 2:].reset_index(drop=True) # add BLUPs predictiors from R # match columns to match from both df left_keys are in sl_..75_avg, right_keys are in blup left_keys = [ "Age", "Betrayal", "Anger", "Sadness", "Disgust", "Surprise", "Cog_Motivate", "Cog_Reasons", "Cog_Defend", "Cog_ToM", "IRIpt", "IRIfs", "IRIec", "IRIpd", ] right_keys = [ "age", "betrayal", "anger", "sadness", "disgust", "surprise", "motivation", "reason", "defend", "perspective", "IRI_pt", "IRI_fs", "IRI_ec", "IRI_pd", ] # import each df blup_df_both = pd.read_csv(dirname + "BLUP_predictors_both75_raw.csv") blup_df_dg = pd.read_csv(dirname + "BLUP_predictors_dg75_raw.csv") blup_df_tg = pd.read_csv(dirname + "BLUP_predictors_tg75_raw.csv") # columns you want to have in your dataset blup_cols = [ col for col in blup_df_both.columns if not col.endswith(".x") and ("c." in col or col in ["female.y", "british.y"]) ] sl_cols = list(sl_both75_avg.columns) sl_cols.extend(blup_cols) # merge two df based on given keys, return error if one row match multiple rows sl_both75_avg = sl_both75_avg.merge( blup_df_both, left_on=left_keys, right_on=right_keys, validate="one_to_one" ) # extract wanted columns sl_both75_avg = sl_both75_avg[sl_cols] # rename cols sl_both75_avg.columns = sl_both75_avg.columns.str.replace(".", "_").str.replace( "_y", "" ) # repeat for dg sl_dg75_avg = sl_dg75_avg.merge( blup_df_dg, left_on=left_keys, right_on=right_keys, validate="one_to_one" ) sl_dg75_avg = sl_dg75_avg[sl_cols] sl_dg75_avg.columns = sl_dg75_avg.columns.str.replace(".", "_").str.replace("_y", "") # repeat for tg sl_tg75_avg = sl_tg75_avg.merge( blup_df_tg, left_on=left_keys, right_on=right_keys, validate="one_to_one" ) sl_tg75_avg = sl_tg75_avg[sl_cols] sl_tg75_avg.columns = sl_tg75_avg.columns.str.replace(".", "_").str.replace("_y", "") # cut the cols before subNum (original approach) dataframes = [ sl_all_avg, sl_both75_avg, sl_both75_unfold, sl_dg75_avg, sl_dg75_unfold, sl_tg75_avg, sl_tg75_unfold, ] names = [ "sl_all_avg", "sl_both75_avg", "sl_both75_unfold", "sl_dg75_avg", "sl_dg75_unfold", "sl_tg75_avg", "sl_tg75_unfold", ] tg_multi = 3 for df, name in zip(dataframes, names): df["trustworthiness_avg"] = ( df["trstee_ST1"] / (tg_multi * 2) + df["trstee_ST2"] / (tg_multi * 4) + df["trstee_ST3"] / (tg_multi * 6) + df["trstee_ST4"] / (tg_multi * 8) + df["trstee_ST5"] / (tg_multi * 10) ) / 5 filename = f"{name}.csv" df.to_csv( filename, index=False ) # Save the dataframe as a CSV file using the file name sl_tg75_avg_blups = pd.read_csv("sl_tg75_avg.csv") sl_dg75_avg_blups = pd.read_csv("sl_dg75_avg.csv") sl_both75_avg_blups = pd.read_csv("sl_both75_avg.csv") ## mean center # tg75 sl_tg75_avg_blups["trustworthiness_c"] = ( sl_tg75_avg_blups["trustworthiness_avg"] - sl_tg75_avg_blups["trustworthiness_avg"].mean() ) sl_tg75_avg_blups["BIS11ATT_c"] = ( sl_tg75_avg_blups["BIS11ATT"] - sl_tg75_avg_blups["BIS11ATT"].mean() ) sl_tg75_avg_blups["BIS11MT_c"] = ( sl_tg75_avg_blups["BIS11MT"] - sl_tg75_avg_blups["BIS11MT"].mean() ) sl_tg75_avg_blups["BIS11NP_c"] = ( sl_tg75_avg_blups["BIS11NP"] - sl_tg75_avg_blups["BIS11NP"].mean() ) sl_tg75_avg_blups["trustAttitude_c"] = ( sl_tg75_avg_blups["trustAttitude"] - sl_tg75_avg_blups["trustAttitude"].mean() ) sl_tg75_avg_blups["riskTaking_c"] = ( sl_tg75_avg_blups["riskTaking"] - sl_tg75_avg_blups["riskTaking"].mean() ) sl_tg75_avg_blups["TG_exptRatio_c"] = ( sl_tg75_avg_blups["TG_exptRatio"] - sl_tg75_avg_blups["TG_exptRatio"].mean() ) sl_tg75_avg_blups["TG_exptRatioAdj_c"] = ( sl_tg75_avg_blups["TG_exptRatioAdj"] - sl_tg75_avg_blups["TG_exptRatioAdj"].mean() ) # dg75 sl_dg75_avg_blups["trustworthiness_c"] = ( sl_dg75_avg_blups["trustworthiness_avg"] - sl_dg75_avg_blups["trustworthiness_avg"].mean() ) sl_dg75_avg_blups["BIS11ATT_c"] = ( sl_dg75_avg_blups["BIS11ATT"] - sl_dg75_avg_blups["BIS11ATT"].mean() ) sl_dg75_avg_blups["BIS11MT_c"] = ( sl_dg75_avg_blups["BIS11MT"] - sl_dg75_avg_blups["BIS11MT"].mean() ) sl_dg75_avg_blups["BIS11NP_c"] = ( sl_dg75_avg_blups["BIS11NP"] - sl_dg75_avg_blups["BIS11NP"].mean() ) sl_dg75_avg_blups["trustAttitude_c"] = ( sl_dg75_avg_blups["trustAttitude"] - sl_dg75_avg_blups["trustAttitude"].mean() ) sl_dg75_avg_blups["riskTaking_c"] = ( sl_dg75_avg_blups["riskTaking"] - sl_dg75_avg_blups["riskTaking"].mean() ) sl_dg75_avg_blups["TG_exptRatio_c"] = ( sl_dg75_avg_blups["TG_exptRatio"] - sl_dg75_avg_blups["TG_exptRatio"].mean() ) sl_dg75_avg_blups["TG_exptRatioAdj_c"] = ( sl_dg75_avg_blups["TG_exptRatioAdj"] - sl_dg75_avg_blups["TG_exptRatioAdj"].mean() ) # RT log transformation sl_tg75_avg_blups["log_TG_trustorRT"] = np.log(sl_tg75_avg_blups["TG_trustorRT"]) sl_tg75_avg_blups["log_DG_dictatorRT"] = np.log(sl_tg75_avg_blups["DG_dictatorRT"]) sl_tg75_avg_blups["log_TG_expectRT"] = np.log(sl_tg75_avg_blups["TG_expectRT"]) sl_tg75_avg_blups["log_TG_closeRT"] = np.log(sl_tg75_avg_blups["TG_closeRT"]) sl_tg75_avg_blups["log_DG_closeRT"] = np.log(sl_tg75_avg_blups["DG_closeRT"]) sns.regplot(x="IRIec", y="BLUP_c_betrayal", data=sl_tg75_avg_blups) plt.xlabel("IRI ec") plt.ylabel("BLUP betrayal") plt.show() # all IRI to predict betrayal BLUPs betrayal_blups_vs_all_IRI = ols( "BLUP_c_betrayal ~ female + c_income + c_age + c_education + british + c_IRI_pt + c_IRI_fs + c_IRI_ec + c_IRI_pd ", data=sl_tg75_avg_blups, ).fit() betrayal_blups_vs_all_IRI.summary()
false
0
3,251
0
3,251
3,251
129894779
<jupyter_start><jupyter_text>ntasset Kaggle dataset identifier: ntasset <jupyter_script>import pandas as pd import numpy as np import sys from functools import reduce def get_funda_df(): csi_df = pd.read_csv("/kaggle/input/ntasset/test/companystockinfo.csv") nonfin_income_df = pd.read_csv("/kaggle/input/ntasset/test/incomestatement.csv") nonfin_bs_df = pd.read_csv("/kaggle/input/ntasset/test/balancesheet.csv") # Merge annual tables funda_df = pd.merge( nonfin_income_df, nonfin_bs_df, on=["companyid", "year"], how="inner" ) funda_df = pd.merge(csi_df, funda_df, on=["companyid"], how="inner") return funda_df def get_forecast_df(): csi_df = pd.read_csv("/kaggle/input/ntasset/test/companystockinfo.csv") nonfin_forecastannual_df = pd.read_csv("/kaggle/input/ntasset/test/forecast.csv") fxrate_df = pd.read_csv("/kaggle/input/ntasset/test/fxrate.csv") # Merge forecast table forecast_df = pd.merge( csi_df, nonfin_forecastannual_df, on=["companyid"], how="inner" ) return forecast_df if __name__ == "__main__": funda_df = get_funda_df() forecast_df = get_forecast_df() funda_df = funda_df.sort_values(by=["companyid", "year"]) forecast_df = forecast_df.sort_values(by=["companyid", "year"]) print(funda_df) print(forecast_df) output_df = pd.DataFrame() output_df[["companyid", "latestfinyear"]] = funda_df[["companyid", "latestfinyear"]] output_df = output_df.sort_values(by="companyid") output_df = output_df.drop_duplicates(subset=["companyid"]) output_df = output_df.reset_index(drop=True) prev_row = None roe_val = [] for _, r in funda_df.iterrows(): if prev_row is None: roe_val.append(0) else: roe_val.append( 100 * r["netprofit"] / (0.5 * r["totequity"] + prev_row["totequity"]) ) prev_row = r funda_df["roe"] = roe_val for i in range(4, -1, -1): if i == 0: output_df["roe fy"] = list( ( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"]].sort_values( by=["companyid"] )["roe"] ) ) else: output_df["roe fy-" + str(i)] = list( ( funda_df.loc[ funda_df["year"] == funda_df["latestfinyear"] - i ].sort_values(by=["companyid"])["roe"] ) ) for i in range(1, 4): tmp_df = pd.DataFrame( forecast_df.loc[forecast_df["year"] == forecast_df["latestfinyear"] + i][ ["companyid", "roe"] ] ) tmp_df = tmp_df.rename(columns={"roe": "roe f+" + str(i)}) output_df = pd.merge(output_df, tmp_df, how="left") output_df["5yr median roe"] = output_df.apply( lambda x: np.nanmedian( x[["roe fy", "roe fy-1", "roe fy-2", "roe fy-3", "roe fy-4"]] ), axis=1, ) output_df # Target Columns: ['roa fy-4', 'roa fy-3', 'roa fy-2', 'roa fy-1', 'roa fy'] # roa[fy-i] = 100*netprofit[fy-i] / (0.5*(totassets[fy-i] + totassets[fy-i-1])) prev_row = None roa_val = [] for _, r in funda_df.iterrows(): if prev_row is None: roa_val.append(0) else: roa_val.append( 100 * r["netprofit"] / (0.5 * r["totassets"] + prev_row["totassets"]) ) prev_row = r funda_df["roa"] = roa_val funda_df # Target Columns: ['roa fy-4', 'roa fy-3', 'roa fy-2', 'roa fy-1', 'roa fy'] # roa[fy-i] = 100*netprofit[fy-i] / (0.5*(totassets[fy-i] + totassets[fy-i-1])) for i in range(4, -1, -1): if i == 0: output_df["roa fy"] = list( ( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"]].sort_values( by=["companyid"] )["roa"] ) ) else: output_df["roa fy-" + str(i)] = list( ( funda_df.loc[ funda_df["year"] == funda_df["latestfinyear"] - i ].sort_values(by=["companyid"])["roa"] ) ) # Target Columns: ['roa fy+1', 'roa fy+2', 'roa fy+3'] # Pulled directly from forecast.csv for i in range(1, 4): tmp_df = pd.DataFrame( forecast_df.loc[forecast_df["year"] == forecast_df["latestfinyear"] + i][ ["companyid", "roa"] ] ) tmp_df = tmp_df.rename(columns={"roa": "roa fy+" + str(i)}) output_df = pd.merge(output_df, tmp_df, how="left") # Target Columns: ['5yr median roa'] # '5yr median roa' = numpy.nanmedian(['roa fy-4', 'roa fy-3', 'roa fy-2', 'roa fy-1', 'roa fy']) output_df["5yr median roa"] = output_df.apply( lambda x: np.nanmedian( x[["roa fy", "roa fy-1", "roa fy-2", "roa fy-3", "roa fy-4"]] ), axis=1, ) output_df # Target Columns: ['netde fy'] # netde[fy] = (totaldebt[fy] – cashncashequiv[fy])/ totequity[fy] funda_df["netde"] = (funda_df["totaldebt"] - funda_df["cashncashequiv"]) / funda_df[ "totequity" ] output_df["netde fy"] = list( ( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"]].sort_values( by=["companyid"] )["netde"] ) ) output_df # Target Columns: ['revenue fy-4', 'revenue fy-3', 'revenue fy-2', 'revenue fy-1', 'revenue fy'] # Pulled directly from incomestatement.csv for i in range(4, -1, -1): if i != 0: tmp_df = pd.DataFrame( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"] - i][ ["companyid", "revenue"] ] ) tmp_df = tmp_df.rename(columns={"revenue": "revenue fy-" + str(i)}) output_df = pd.merge(output_df, tmp_df, how="left") else: tmp_df = pd.DataFrame( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"] - i][ ["companyid", "revenue"] ] ) tmp_df = tmp_df.rename(columns={"revenue": "revenue fy"}) output_df = pd.merge(output_df, tmp_df, how="left") output_df # Target Columns: ['revenue fy+1', 'revenue fy+2', 'revenue fy+3'] # Pulled directly from forecast.csv and convert currency from ‘estcurr’ to ‘reportingcurr’. for i in range(1, 4): tmp_df = pd.DataFrame( ( forecast_df.loc[forecast_df["year"] == forecast_df["latestfinyear"] + i][ ["companyid", "revenue"] ] ) ) tmp_df = tmp_df.rename(columns={"revenue": "revenue fy+" + str(i)}) output_df = pd.merge(output_df, tmp_df, how="left") output_df # Target Columns: ['revenue 2yr cagr','revenue 3yr cagr'] # revenue 2yr cagr = 100*[(revenue fy+2 / revenue fy)^(0.5) – 1] # revenue 3yr cagr = 100*[(revenue fy+3 / revenue fy)^(0.33) – 1]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/894/129894779.ipynb
ntasset
tirapatrs
[{"Id": 129894779, "ScriptId": 38548904, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11676930, "CreationDate": "05/17/2023 09:06:25", "VersionNumber": 2.0, "Title": "NTAsset", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 126.0, "LinesInsertedFromPrevious": 70.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 56.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186305369, "KernelVersionId": 129894779, "SourceDatasetVersionId": 5699193}]
[{"Id": 5699193, "DatasetId": 3277112, "DatasourceVersionId": 5774852, "CreatorUserId": 11676930, "LicenseName": "Unknown", "CreationDate": "05/16/2023 14:18:19", "VersionNumber": 1.0, "Title": "ntasset", "Slug": "ntasset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3277112, "CreatorUserId": 11676930, "OwnerUserId": 11676930.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5699193.0, "CurrentDatasourceVersionId": 5774852.0, "ForumId": 3342800, "Type": 2, "CreationDate": "05/16/2023 14:18:19", "LastActivityDate": "05/16/2023", "TotalViews": 5, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 11676930, "UserName": "tirapatrs", "DisplayName": "Tirapatr S", "RegisterDate": "09/22/2022", "PerformanceTier": 0}]
import pandas as pd import numpy as np import sys from functools import reduce def get_funda_df(): csi_df = pd.read_csv("/kaggle/input/ntasset/test/companystockinfo.csv") nonfin_income_df = pd.read_csv("/kaggle/input/ntasset/test/incomestatement.csv") nonfin_bs_df = pd.read_csv("/kaggle/input/ntasset/test/balancesheet.csv") # Merge annual tables funda_df = pd.merge( nonfin_income_df, nonfin_bs_df, on=["companyid", "year"], how="inner" ) funda_df = pd.merge(csi_df, funda_df, on=["companyid"], how="inner") return funda_df def get_forecast_df(): csi_df = pd.read_csv("/kaggle/input/ntasset/test/companystockinfo.csv") nonfin_forecastannual_df = pd.read_csv("/kaggle/input/ntasset/test/forecast.csv") fxrate_df = pd.read_csv("/kaggle/input/ntasset/test/fxrate.csv") # Merge forecast table forecast_df = pd.merge( csi_df, nonfin_forecastannual_df, on=["companyid"], how="inner" ) return forecast_df if __name__ == "__main__": funda_df = get_funda_df() forecast_df = get_forecast_df() funda_df = funda_df.sort_values(by=["companyid", "year"]) forecast_df = forecast_df.sort_values(by=["companyid", "year"]) print(funda_df) print(forecast_df) output_df = pd.DataFrame() output_df[["companyid", "latestfinyear"]] = funda_df[["companyid", "latestfinyear"]] output_df = output_df.sort_values(by="companyid") output_df = output_df.drop_duplicates(subset=["companyid"]) output_df = output_df.reset_index(drop=True) prev_row = None roe_val = [] for _, r in funda_df.iterrows(): if prev_row is None: roe_val.append(0) else: roe_val.append( 100 * r["netprofit"] / (0.5 * r["totequity"] + prev_row["totequity"]) ) prev_row = r funda_df["roe"] = roe_val for i in range(4, -1, -1): if i == 0: output_df["roe fy"] = list( ( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"]].sort_values( by=["companyid"] )["roe"] ) ) else: output_df["roe fy-" + str(i)] = list( ( funda_df.loc[ funda_df["year"] == funda_df["latestfinyear"] - i ].sort_values(by=["companyid"])["roe"] ) ) for i in range(1, 4): tmp_df = pd.DataFrame( forecast_df.loc[forecast_df["year"] == forecast_df["latestfinyear"] + i][ ["companyid", "roe"] ] ) tmp_df = tmp_df.rename(columns={"roe": "roe f+" + str(i)}) output_df = pd.merge(output_df, tmp_df, how="left") output_df["5yr median roe"] = output_df.apply( lambda x: np.nanmedian( x[["roe fy", "roe fy-1", "roe fy-2", "roe fy-3", "roe fy-4"]] ), axis=1, ) output_df # Target Columns: ['roa fy-4', 'roa fy-3', 'roa fy-2', 'roa fy-1', 'roa fy'] # roa[fy-i] = 100*netprofit[fy-i] / (0.5*(totassets[fy-i] + totassets[fy-i-1])) prev_row = None roa_val = [] for _, r in funda_df.iterrows(): if prev_row is None: roa_val.append(0) else: roa_val.append( 100 * r["netprofit"] / (0.5 * r["totassets"] + prev_row["totassets"]) ) prev_row = r funda_df["roa"] = roa_val funda_df # Target Columns: ['roa fy-4', 'roa fy-3', 'roa fy-2', 'roa fy-1', 'roa fy'] # roa[fy-i] = 100*netprofit[fy-i] / (0.5*(totassets[fy-i] + totassets[fy-i-1])) for i in range(4, -1, -1): if i == 0: output_df["roa fy"] = list( ( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"]].sort_values( by=["companyid"] )["roa"] ) ) else: output_df["roa fy-" + str(i)] = list( ( funda_df.loc[ funda_df["year"] == funda_df["latestfinyear"] - i ].sort_values(by=["companyid"])["roa"] ) ) # Target Columns: ['roa fy+1', 'roa fy+2', 'roa fy+3'] # Pulled directly from forecast.csv for i in range(1, 4): tmp_df = pd.DataFrame( forecast_df.loc[forecast_df["year"] == forecast_df["latestfinyear"] + i][ ["companyid", "roa"] ] ) tmp_df = tmp_df.rename(columns={"roa": "roa fy+" + str(i)}) output_df = pd.merge(output_df, tmp_df, how="left") # Target Columns: ['5yr median roa'] # '5yr median roa' = numpy.nanmedian(['roa fy-4', 'roa fy-3', 'roa fy-2', 'roa fy-1', 'roa fy']) output_df["5yr median roa"] = output_df.apply( lambda x: np.nanmedian( x[["roa fy", "roa fy-1", "roa fy-2", "roa fy-3", "roa fy-4"]] ), axis=1, ) output_df # Target Columns: ['netde fy'] # netde[fy] = (totaldebt[fy] – cashncashequiv[fy])/ totequity[fy] funda_df["netde"] = (funda_df["totaldebt"] - funda_df["cashncashequiv"]) / funda_df[ "totequity" ] output_df["netde fy"] = list( ( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"]].sort_values( by=["companyid"] )["netde"] ) ) output_df # Target Columns: ['revenue fy-4', 'revenue fy-3', 'revenue fy-2', 'revenue fy-1', 'revenue fy'] # Pulled directly from incomestatement.csv for i in range(4, -1, -1): if i != 0: tmp_df = pd.DataFrame( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"] - i][ ["companyid", "revenue"] ] ) tmp_df = tmp_df.rename(columns={"revenue": "revenue fy-" + str(i)}) output_df = pd.merge(output_df, tmp_df, how="left") else: tmp_df = pd.DataFrame( funda_df.loc[funda_df["year"] == funda_df["latestfinyear"] - i][ ["companyid", "revenue"] ] ) tmp_df = tmp_df.rename(columns={"revenue": "revenue fy"}) output_df = pd.merge(output_df, tmp_df, how="left") output_df # Target Columns: ['revenue fy+1', 'revenue fy+2', 'revenue fy+3'] # Pulled directly from forecast.csv and convert currency from ‘estcurr’ to ‘reportingcurr’. for i in range(1, 4): tmp_df = pd.DataFrame( ( forecast_df.loc[forecast_df["year"] == forecast_df["latestfinyear"] + i][ ["companyid", "revenue"] ] ) ) tmp_df = tmp_df.rename(columns={"revenue": "revenue fy+" + str(i)}) output_df = pd.merge(output_df, tmp_df, how="left") output_df # Target Columns: ['revenue 2yr cagr','revenue 3yr cagr'] # revenue 2yr cagr = 100*[(revenue fy+2 / revenue fy)^(0.5) – 1] # revenue 3yr cagr = 100*[(revenue fy+3 / revenue fy)^(0.33) – 1]
false
5
2,338
0
2,356
2,338
129894036
<jupyter_start><jupyter_text>World Exports Value 2021 Kaggle dataset identifier: world-exports-value-2021 <jupyter_script># Step1: Necessery imports import statistics import pandas as pd import numpy as np import matplotlib as mpl import scipy as scipy import seaborn as sns import plotly.express as px import plotly.figure_factory as ff import plotly.graph_objects as go import matplotlib.pyplot as plt # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Step2: Loading the data # Change the csv file for another brand # Added Data types for memory optimization and faster loading data = pd.read_csv( "/kaggle/input/world-exports-value-2021/World-Exports-Value-2021--Click-to-Select-a-Product.csv", error_bad_lines=False, ) data.shape data.info() # ***Standard deviation*** std = np.std(data) print(std) # ***Coefficient of Variation*** cv = np.std(data) / np.mean(data) print(cv) # ***Variance*** var_full = np.var(data) print(var_full) # # Describing the data # Step3:Describing the data data.describe() # Step3:Describing the data - finding the mode [most frequent] data.mode() # # **#Treemap** fig = px.treemap(data, path=["HS2", "HS4", "HS6"], values="Trade Value") fig.update_layout( title="Woirld Exports Value 2021", width=1200, height=1200, ) fig.show() fig = px.treemap( data, path=["HS2", "HS4", "HS6"], values="Trade Value", color="Trade Value", color_continuous_scale="RdYlGn", ) fig.update_layout( title="Trade Val", width=1000, height=600, ) fig.show() # # **# 3-D chart** # controls the numer of rows to be read in the dataframe start, end = 0, 4700 fig = go.Figure( data=go.Scatter3d( x=data["HS2"][start:end], y=data["HS4"][start:end], z=data["HS6"][start:end], text=data["Section"][start:end], mode="markers", marker=dict( sizemode="diameter", sizeref=5000000000, size=data["Trade Value"][start:end], color=data["Trade Value"][start:end], colorscale="Viridis", colorbar_title="Trade Value<br>", line_color="rgb(140, 140, 170)", ), ) ) fig.update_layout( height=1200, width=1200, title="3-D Graph - X-HS2,Y-HS4,Z-HS6,Size-Trade Value,Color-Trade Value", ) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/894/129894036.ipynb
world-exports-value-2021
valchovalev
[{"Id": 129894036, "ScriptId": 38630490, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7041364, "CreationDate": "05/17/2023 09:01:31", "VersionNumber": 1.0, "Title": "EDA : World Exports Value 2021", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 52.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 75.0, "LinesInsertedFromFork": 52.0, "LinesDeletedFromFork": 71.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 75.0, "TotalVotes": 0}]
[{"Id": 186304611, "KernelVersionId": 129894036, "SourceDatasetVersionId": 5705064}]
[{"Id": 5705064, "DatasetId": 3279850, "DatasourceVersionId": 5780848, "CreatorUserId": 7041364, "LicenseName": "Unknown", "CreationDate": "05/17/2023 06:35:08", "VersionNumber": 1.0, "Title": "World Exports Value 2021", "Slug": "world-exports-value-2021", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3279850, "CreatorUserId": 7041364, "OwnerUserId": 7041364.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5705064.0, "CurrentDatasourceVersionId": 5780848.0, "ForumId": 3345566, "Type": 2, "CreationDate": "05/17/2023 06:35:08", "LastActivityDate": "05/17/2023", "TotalViews": 48, "TotalDownloads": 3, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 7041364, "UserName": "valchovalev", "DisplayName": "valcho valev", "RegisterDate": "03/27/2021", "PerformanceTier": 1}]
# Step1: Necessery imports import statistics import pandas as pd import numpy as np import matplotlib as mpl import scipy as scipy import seaborn as sns import plotly.express as px import plotly.figure_factory as ff import plotly.graph_objects as go import matplotlib.pyplot as plt # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Step2: Loading the data # Change the csv file for another brand # Added Data types for memory optimization and faster loading data = pd.read_csv( "/kaggle/input/world-exports-value-2021/World-Exports-Value-2021--Click-to-Select-a-Product.csv", error_bad_lines=False, ) data.shape data.info() # ***Standard deviation*** std = np.std(data) print(std) # ***Coefficient of Variation*** cv = np.std(data) / np.mean(data) print(cv) # ***Variance*** var_full = np.var(data) print(var_full) # # Describing the data # Step3:Describing the data data.describe() # Step3:Describing the data - finding the mode [most frequent] data.mode() # # **#Treemap** fig = px.treemap(data, path=["HS2", "HS4", "HS6"], values="Trade Value") fig.update_layout( title="Woirld Exports Value 2021", width=1200, height=1200, ) fig.show() fig = px.treemap( data, path=["HS2", "HS4", "HS6"], values="Trade Value", color="Trade Value", color_continuous_scale="RdYlGn", ) fig.update_layout( title="Trade Val", width=1000, height=600, ) fig.show() # # **# 3-D chart** # controls the numer of rows to be read in the dataframe start, end = 0, 4700 fig = go.Figure( data=go.Scatter3d( x=data["HS2"][start:end], y=data["HS4"][start:end], z=data["HS6"][start:end], text=data["Section"][start:end], mode="markers", marker=dict( sizemode="diameter", sizeref=5000000000, size=data["Trade Value"][start:end], color=data["Trade Value"][start:end], colorscale="Viridis", colorbar_title="Trade Value<br>", line_color="rgb(140, 140, 170)", ), ) ) fig.update_layout( height=1200, width=1200, title="3-D Graph - X-HS2,Y-HS4,Z-HS6,Size-Trade Value,Color-Trade Value", ) fig.show()
false
1
870
0
902
870
129900207
<jupyter_start><jupyter_text>iris_data Kaggle dataset identifier: iris-data <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # For graphical representation # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # **On va lire le fichier et afficher les cinq premières lignes** # Load the data set and see first 5 rows a = pd.read_csv("/kaggle/input/iris-data/Iris.csv") iris = pd.DataFrame(a) iris.head() iris.shape iris.info() # # Étant donné que la colonne Id ne sert à rien, nous devons donc la supprimer iris.drop("Id", axis=1, inplace=True) iris.head() # # **Afficher le nombre des espèces par groupe** iris.groupby("Species").size() # # **Afficher la distribution par attribut** iris.hist() his = plt.gcf() his.set_size_inches(12, 6) plt.show() # # **Créer un modèle basé sur le support vector machine (SVM)** test_size = 0.20 seed = 7 score = "accuracy" # # **Diviser le dataset en données d’apprentissage et test** from sklearn import model_selection X = iris.iloc[:, :4] y = iris.iloc[:, 4] X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, test_size=test_size, random_state=seed ) from sklearn import svm from sklearn.model_selection import cross_val_score, KFold from sklearn.metrics import accuracy_score # Create an SVM classifier # kernels : linear, poly, rbf(Radial Basis Function), sigmoid, precomputed clf = svm.SVC(kernel="rbf") # # **La validation croisée (Cross Validation k = 5)** # Define the number of folds for cross-validation k = 5 # Create a KFold object kf = KFold(n_splits=k, shuffle=True, random_state=42) # Perform k-fold cross-validation scores = cross_val_score(clf, X_train, y_train, cv=kf, scoring=score) # Print the accuracy scores for each fold for fold, sc in enumerate(scores): print(f"Fold {fold+1}: {sc:.4f}") # Calculate and print the mean accuracy across all folds mean_accuracy = scores.mean() print(f"Mean Accuracy: {mean_accuracy:.4f}") # # **Evaluation du modèle SVM** # Predictions on test dataset svm = svm.SVC(kernel="rbf") svm.fit(X_train, y_train) pred = svm.predict(X_test) print(accuracy_score(y_test, pred)) # # **Créer un modèle Multilayer perceptron (MLP)** a = pd.read_csv("/kaggle/input/iris-data/Iris.csv", header=None) i = pd.DataFrame(a) iris = i.values # # **Afficher les cinq premières lignes** i.head(5) X = iris[1:, 1:5].astype(float) y = iris[1:, 5] X[0:5] y[0:5] from sklearn.preprocessing import LabelEncoder from keras.utils import np_utils from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from keras.models import Sequential from keras.layers import Dense from sklearn import model_selection from sklearn.metrics import accuracy_score from keras.optimizers import Adam # # **Encodage des espèces sous forme numérique** # Label encode Class (Species) encoder = LabelEncoder() encoder.fit(y) encoded_y = encoder.transform(y) # # **Convertit les valeurs numériques des classes en matrice de classe binaire** # One Hot Encode y_dummy = np_utils.to_categorical(encoded_y) # # **Diviser le dataset en données d’apprentissage et test** X_train, X_test, y_train_one_hot, y_test_one_hot = model_selection.train_test_split( X, y_dummy, test_size=test_size, random_state=seed ) # # **Architecture du modèle** # Deep Learnig Function def deepml_model(): # Model Creation deepml = Sequential() deepml.add(Dense(8, input_dim=4, activation="relu")) # 8 deepml.add(Dense(10, activation="relu")) # 10 deepml.add(Dense(3, activation="softmax")) # 3 # Model Compilation optimiser = Adam(learning_rate=0.001) deepml.compile( loss="categorical_crossentropy", optimizer=optimiser, metrics=[score] ) return deepml # # **Paramétrage du modèle** estimate = KerasClassifier(build_fn=deepml_model, epochs=100, batch_size=5, verbose=0) # Cross # # **La validation croisée (Cross Validation k = 5)** k_fold = KFold(n_splits=5, shuffle=True, random_state=seed) results = cross_val_score(estimate, X_train, y_train_one_hot, cv=k_fold) # add here folds logs for fold, rst in enumerate(results): print(f"Fold {fold+1}: {rst:.4f}") # Calculate and print the mean accuracy across all folds mean_accuracy = results.mean() print(f"Mean Accuracy: {mean_accuracy:.4f}") # # **Evaluation du modèle MLP** # Predictions on test dataset mlp = deepml_model() mlp.fit(X_train, y_train_one_hot, epochs=100, batch_size=5, verbose=0) y_pred_one_hot = mlp.predict(X_test) pred = np.argmax(y_pred_one_hot, axis=1) y_test = np.argmax(y_test_one_hot, axis=1) print(accuracy_score(y_test, pred)) # # **Overfitting** # validation_data=(X_test, y_test_one_hot) X_train_overfit = X_train[:-50] # Use a smaller training set y_train_overfit = y_train_one_hot[:-50] # Use corresponding labels history = mlp.fit( X_train_overfit, y_train_overfit, epochs=100, batch_size=5, validation_data=(X_test, y_test_one_hot), verbose=0, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/900/129900207.ipynb
iris-data
kamrankausar
[{"Id": 129900207, "ScriptId": 38402106, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11365210, "CreationDate": "05/17/2023 09:50:50", "VersionNumber": 1.0, "Title": "Iris data set machine learning TP", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 194.0, "LinesInsertedFromPrevious": 194.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186312937, "KernelVersionId": 129900207, "SourceDatasetVersionId": 8520}]
[{"Id": 8520, "DatasetId": 5721, "DatasourceVersionId": 8520, "CreatorUserId": 480578, "LicenseName": "CC0: Public Domain", "CreationDate": "11/30/2017 10:26:01", "VersionNumber": 2.0, "Title": "iris_data", "Slug": "iris-data", "Subtitle": "Hello World of Machine Learning and Deep Learning", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 5107.0, "TotalUncompressedBytes": 5107.0}]
[{"Id": 5721, "CreatorUserId": 480578, "OwnerUserId": 480578.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 8520.0, "CurrentDatasourceVersionId": 8520.0, "ForumId": 11974, "Type": 2, "CreationDate": "11/30/2017 10:26:01", "LastActivityDate": "01/31/2018", "TotalViews": 9401, "TotalDownloads": 1764, "TotalVotes": 22, "TotalKernels": 45}]
[{"Id": 480578, "UserName": "kamrankausar", "DisplayName": "kamran", "RegisterDate": "12/04/2015", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # For graphical representation # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # **On va lire le fichier et afficher les cinq premières lignes** # Load the data set and see first 5 rows a = pd.read_csv("/kaggle/input/iris-data/Iris.csv") iris = pd.DataFrame(a) iris.head() iris.shape iris.info() # # Étant donné que la colonne Id ne sert à rien, nous devons donc la supprimer iris.drop("Id", axis=1, inplace=True) iris.head() # # **Afficher le nombre des espèces par groupe** iris.groupby("Species").size() # # **Afficher la distribution par attribut** iris.hist() his = plt.gcf() his.set_size_inches(12, 6) plt.show() # # **Créer un modèle basé sur le support vector machine (SVM)** test_size = 0.20 seed = 7 score = "accuracy" # # **Diviser le dataset en données d’apprentissage et test** from sklearn import model_selection X = iris.iloc[:, :4] y = iris.iloc[:, 4] X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, test_size=test_size, random_state=seed ) from sklearn import svm from sklearn.model_selection import cross_val_score, KFold from sklearn.metrics import accuracy_score # Create an SVM classifier # kernels : linear, poly, rbf(Radial Basis Function), sigmoid, precomputed clf = svm.SVC(kernel="rbf") # # **La validation croisée (Cross Validation k = 5)** # Define the number of folds for cross-validation k = 5 # Create a KFold object kf = KFold(n_splits=k, shuffle=True, random_state=42) # Perform k-fold cross-validation scores = cross_val_score(clf, X_train, y_train, cv=kf, scoring=score) # Print the accuracy scores for each fold for fold, sc in enumerate(scores): print(f"Fold {fold+1}: {sc:.4f}") # Calculate and print the mean accuracy across all folds mean_accuracy = scores.mean() print(f"Mean Accuracy: {mean_accuracy:.4f}") # # **Evaluation du modèle SVM** # Predictions on test dataset svm = svm.SVC(kernel="rbf") svm.fit(X_train, y_train) pred = svm.predict(X_test) print(accuracy_score(y_test, pred)) # # **Créer un modèle Multilayer perceptron (MLP)** a = pd.read_csv("/kaggle/input/iris-data/Iris.csv", header=None) i = pd.DataFrame(a) iris = i.values # # **Afficher les cinq premières lignes** i.head(5) X = iris[1:, 1:5].astype(float) y = iris[1:, 5] X[0:5] y[0:5] from sklearn.preprocessing import LabelEncoder from keras.utils import np_utils from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from keras.models import Sequential from keras.layers import Dense from sklearn import model_selection from sklearn.metrics import accuracy_score from keras.optimizers import Adam # # **Encodage des espèces sous forme numérique** # Label encode Class (Species) encoder = LabelEncoder() encoder.fit(y) encoded_y = encoder.transform(y) # # **Convertit les valeurs numériques des classes en matrice de classe binaire** # One Hot Encode y_dummy = np_utils.to_categorical(encoded_y) # # **Diviser le dataset en données d’apprentissage et test** X_train, X_test, y_train_one_hot, y_test_one_hot = model_selection.train_test_split( X, y_dummy, test_size=test_size, random_state=seed ) # # **Architecture du modèle** # Deep Learnig Function def deepml_model(): # Model Creation deepml = Sequential() deepml.add(Dense(8, input_dim=4, activation="relu")) # 8 deepml.add(Dense(10, activation="relu")) # 10 deepml.add(Dense(3, activation="softmax")) # 3 # Model Compilation optimiser = Adam(learning_rate=0.001) deepml.compile( loss="categorical_crossentropy", optimizer=optimiser, metrics=[score] ) return deepml # # **Paramétrage du modèle** estimate = KerasClassifier(build_fn=deepml_model, epochs=100, batch_size=5, verbose=0) # Cross # # **La validation croisée (Cross Validation k = 5)** k_fold = KFold(n_splits=5, shuffle=True, random_state=seed) results = cross_val_score(estimate, X_train, y_train_one_hot, cv=k_fold) # add here folds logs for fold, rst in enumerate(results): print(f"Fold {fold+1}: {rst:.4f}") # Calculate and print the mean accuracy across all folds mean_accuracy = results.mean() print(f"Mean Accuracy: {mean_accuracy:.4f}") # # **Evaluation du modèle MLP** # Predictions on test dataset mlp = deepml_model() mlp.fit(X_train, y_train_one_hot, epochs=100, batch_size=5, verbose=0) y_pred_one_hot = mlp.predict(X_test) pred = np.argmax(y_pred_one_hot, axis=1) y_test = np.argmax(y_test_one_hot, axis=1) print(accuracy_score(y_test, pred)) # # **Overfitting** # validation_data=(X_test, y_test_one_hot) X_train_overfit = X_train[:-50] # Use a smaller training set y_train_overfit = y_train_one_hot[:-50] # Use corresponding labels history = mlp.fit( X_train_overfit, y_train_overfit, epochs=100, batch_size=5, validation_data=(X_test, y_test_one_hot), verbose=0, )
false
1
1,847
0
1,868
1,847
129900468
import numpy as np from PIL import Image import math import matplotlib.pyplot as plt data = np.load("/kaggle/input/lenet-kernal-info/weights_conv1.npy") class LeNet5: def __init__(self): self.conv1_filters = 6 self.conv1_filter_size = 5 self.conv2_filters = 16 self.conv2_filter_size = 5 self.fc1_units = 120 self.fc2_units = 84 self.fc3_units = 10 # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Need to debug hear %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% def convolve2d(self, image, filters): matrix_in = image # add padding of 1 to the matrix padding = 1 matrix = np.pad( matrix_in, pad_width=padding, mode="constant", constant_values=0 ) print("Padded Matrix:\n", matrix) matrix_kernel = filters kernel_size = (matrix_kernel.shape)[0] image_size = (matrix_in.shape)[0] linear_kernel_size = kernel_size * kernel_size padded_image_size = (matrix.shape)[0] stride = 1 conv = ((image_size + 2 * padding - kernel_size) // stride) + 1 kernel_linear = matrix_kernel.reshape( linear_kernel_size, ) # kernel_linear_out = np.empty((9, ), dtype=int) kernel_linear_out = [0] * linear_kernel_size unique_elements = np.unique(kernel_linear) def bits_required_for_unique_memory(pqrs): bits_required = (math.log(pqrs)) / (math.log(2)) integer_bit_length = round(bits_required) fraction_value = ( bits_required % 1 ) # checkl if fraction part is available or not if 0 < fraction_value < 0.5: integer_bit_length = ( integer_bit_length + 1 ) # add one more bit if fraction part is present integer_bit_length = int(integer_bit_length) return integer_bit_length bits = bits_required_for_unique_memory(len(unique_elements)) print(bits) code_list = [bin(x)[2:].rjust(bits, "0") for x in range(2**bits)] def code_word_mem( uni_mem_num, code_list_num, quantize_layer_name, quantize_layer_name_out ): for i in range(len(quantize_layer_name)): # print(quantize_layer_name[i]) for j in range(len(uni_mem_num)): if quantize_layer_name[i] == uni_mem_num[j]: quantize_layer_name_out[i] = str(code_list_num[j]) code_word_mem(unique_elements, code_list, kernel_linear, kernel_linear_out) # print(unique_elements, kernel_linear, kernel_linear_out) kernel_linear_out_np = np.array(kernel_linear_out) memory_temp = [[0] * 2 for i in range(len(unique_elements))] memory_add_matrix = [[0] * conv for i in range(conv)] for i in range(0, padded_image_size, stride): for j in range(0, padded_image_size, stride): if (i + kernel_size) <= (padded_image_size) and (j + kernel_size) <= ( padded_image_size ): mat_temp = matrix[i : i + kernel_size, j : j + kernel_size] mat_temp_np = np.array(mat_temp) mat_temp_np_lin = mat_temp_np.reshape( linear_kernel_size, ) temp_add_matrix = 0 for k in range(len(unique_elements)): memory_temp[k][0] = unique_elements[k] temp_add = 0 for l in range(len(kernel_linear_out_np)): if code_list[k] == kernel_linear_out_np[l]: temp_add = temp_add + mat_temp_np_lin[l] memory_temp[k][1] = temp_add temp_add_matrix = temp_add_matrix + ( memory_temp[k][0] * memory_temp[k][1] ) memory_add_matrix[i // stride][j // stride] = temp_add_matrix memory_add_matrix_np = np.array(memory_add_matrix) return memory_add_matrix_np # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% def relu(self, x): return np.maximum(x, 0) def max_pooling(self, image, size=2): height, width, _ = image.shape output_size = height // size pooled = np.zeros((output_size, output_size, _)) for i in range(output_size): for j in range(output_size): for c in range(_): pooled[i, j, c] = np.max( image[i * size : i * size + size, j * size : j * size + size, c] ) return pooled def flatten(self, image): return image.flatten() def fc_layer(self, x, weights, bias): return np.dot(x, weights) + bias def softmax(self, x): exps = np.exp(x - np.max(x)) return exps / np.sum(exps) # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% def forward_propagation(self, image): conv1_output = self.convolve2d(image, self.conv1_weights) + self.conv1_bias conv1_output = self.relu(conv1_output) pool1_output = self.max_pooling(conv1_output) conv2_output = ( self.convolve2d(pool1_output, self.conv2_weights) + self.conv2_bias ) conv2_output = self.relu(conv2_output) pool2_output = self.max_pooling(conv2_output) fc1_output = np.dot(self.fc1_weights, pool2_output.flatten()) + self.fc1_bias fc1_output = self.relu(fc1_output) fc2_output = np.dot(self.fc2_weights, fc1_output) + self.fc2_bias fc2_output = self.relu(fc2_output) fc3_output = np.dot(self.fc3_weights, fc2_output) + self.fc3_bias output = self.softmax(fc3_output) return output model = LeNet5() conv1_weights = np.load("/kaggle/input/lenet-kernal-info/weights_conv1.npy") conv1_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/bias_conv1.npy") conv2_weights = np.load("/kaggle/input/lenet-kernal-info/weights_conv2.npy") conv2_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/bias_conv2.npy") fc1_weights = np.load("/kaggle/input/lenet-kernal-info/weights_fc1.npy") fc1_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/bias_fc1.npy") fc2_weights = np.load("/kaggle/input/lenet-kernal-info/weights_fc2.npy") fc2_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/weights_fc2.npy") fc3_weights = np.load("/kaggle/input/lenet-kernal-info/weights_fc3.npy") fc3_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/bias_fc3.npy") # Assign the loaded weights to the model model.conv1_weights = conv1_weights model.conv1_bias = conv1_bias model.conv2_weights = conv2_weights model.conv2_bias = conv2_bias model.fc1_weights = fc1_weights model.fc1_bias = fc1_bias model.fc2_weights = fc2_weights model.fc2_bias = fc2_bias model.fc3_weights = fc3_weights model.fc3_bias = fc3_bias image_path = "path_to_your_image.jpg" # Replace with the actual image path image = Image.open(image_path) image = image.resize((32, 32)) # Resize the image to 32x32 image = np.array(image) # Convert the image to a numpy array image = image.transpose( (2, 0, 1) ) # Transpose the dimensions to match LeNet-5 input shape image = image.astype(np.float32) / 255.0 # Normalize the pixel values between 0 and 1 # Perform forward propagation output = model.forward_propagation(image) # Get the predicted class predicted_class = np.argmax(output) print("Predicted Class:", predicted_class)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/900/129900468.ipynb
null
null
[{"Id": 129900468, "ScriptId": 38353739, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13480720, "CreationDate": "05/17/2023 09:52:57", "VersionNumber": 1.0, "Title": "test_lenet", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 177.0, "LinesInsertedFromPrevious": 177.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np from PIL import Image import math import matplotlib.pyplot as plt data = np.load("/kaggle/input/lenet-kernal-info/weights_conv1.npy") class LeNet5: def __init__(self): self.conv1_filters = 6 self.conv1_filter_size = 5 self.conv2_filters = 16 self.conv2_filter_size = 5 self.fc1_units = 120 self.fc2_units = 84 self.fc3_units = 10 # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Need to debug hear %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% def convolve2d(self, image, filters): matrix_in = image # add padding of 1 to the matrix padding = 1 matrix = np.pad( matrix_in, pad_width=padding, mode="constant", constant_values=0 ) print("Padded Matrix:\n", matrix) matrix_kernel = filters kernel_size = (matrix_kernel.shape)[0] image_size = (matrix_in.shape)[0] linear_kernel_size = kernel_size * kernel_size padded_image_size = (matrix.shape)[0] stride = 1 conv = ((image_size + 2 * padding - kernel_size) // stride) + 1 kernel_linear = matrix_kernel.reshape( linear_kernel_size, ) # kernel_linear_out = np.empty((9, ), dtype=int) kernel_linear_out = [0] * linear_kernel_size unique_elements = np.unique(kernel_linear) def bits_required_for_unique_memory(pqrs): bits_required = (math.log(pqrs)) / (math.log(2)) integer_bit_length = round(bits_required) fraction_value = ( bits_required % 1 ) # checkl if fraction part is available or not if 0 < fraction_value < 0.5: integer_bit_length = ( integer_bit_length + 1 ) # add one more bit if fraction part is present integer_bit_length = int(integer_bit_length) return integer_bit_length bits = bits_required_for_unique_memory(len(unique_elements)) print(bits) code_list = [bin(x)[2:].rjust(bits, "0") for x in range(2**bits)] def code_word_mem( uni_mem_num, code_list_num, quantize_layer_name, quantize_layer_name_out ): for i in range(len(quantize_layer_name)): # print(quantize_layer_name[i]) for j in range(len(uni_mem_num)): if quantize_layer_name[i] == uni_mem_num[j]: quantize_layer_name_out[i] = str(code_list_num[j]) code_word_mem(unique_elements, code_list, kernel_linear, kernel_linear_out) # print(unique_elements, kernel_linear, kernel_linear_out) kernel_linear_out_np = np.array(kernel_linear_out) memory_temp = [[0] * 2 for i in range(len(unique_elements))] memory_add_matrix = [[0] * conv for i in range(conv)] for i in range(0, padded_image_size, stride): for j in range(0, padded_image_size, stride): if (i + kernel_size) <= (padded_image_size) and (j + kernel_size) <= ( padded_image_size ): mat_temp = matrix[i : i + kernel_size, j : j + kernel_size] mat_temp_np = np.array(mat_temp) mat_temp_np_lin = mat_temp_np.reshape( linear_kernel_size, ) temp_add_matrix = 0 for k in range(len(unique_elements)): memory_temp[k][0] = unique_elements[k] temp_add = 0 for l in range(len(kernel_linear_out_np)): if code_list[k] == kernel_linear_out_np[l]: temp_add = temp_add + mat_temp_np_lin[l] memory_temp[k][1] = temp_add temp_add_matrix = temp_add_matrix + ( memory_temp[k][0] * memory_temp[k][1] ) memory_add_matrix[i // stride][j // stride] = temp_add_matrix memory_add_matrix_np = np.array(memory_add_matrix) return memory_add_matrix_np # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% def relu(self, x): return np.maximum(x, 0) def max_pooling(self, image, size=2): height, width, _ = image.shape output_size = height // size pooled = np.zeros((output_size, output_size, _)) for i in range(output_size): for j in range(output_size): for c in range(_): pooled[i, j, c] = np.max( image[i * size : i * size + size, j * size : j * size + size, c] ) return pooled def flatten(self, image): return image.flatten() def fc_layer(self, x, weights, bias): return np.dot(x, weights) + bias def softmax(self, x): exps = np.exp(x - np.max(x)) return exps / np.sum(exps) # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% def forward_propagation(self, image): conv1_output = self.convolve2d(image, self.conv1_weights) + self.conv1_bias conv1_output = self.relu(conv1_output) pool1_output = self.max_pooling(conv1_output) conv2_output = ( self.convolve2d(pool1_output, self.conv2_weights) + self.conv2_bias ) conv2_output = self.relu(conv2_output) pool2_output = self.max_pooling(conv2_output) fc1_output = np.dot(self.fc1_weights, pool2_output.flatten()) + self.fc1_bias fc1_output = self.relu(fc1_output) fc2_output = np.dot(self.fc2_weights, fc1_output) + self.fc2_bias fc2_output = self.relu(fc2_output) fc3_output = np.dot(self.fc3_weights, fc2_output) + self.fc3_bias output = self.softmax(fc3_output) return output model = LeNet5() conv1_weights = np.load("/kaggle/input/lenet-kernal-info/weights_conv1.npy") conv1_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/bias_conv1.npy") conv2_weights = np.load("/kaggle/input/lenet-kernal-info/weights_conv2.npy") conv2_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/bias_conv2.npy") fc1_weights = np.load("/kaggle/input/lenet-kernal-info/weights_fc1.npy") fc1_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/bias_fc1.npy") fc2_weights = np.load("/kaggle/input/lenet-kernal-info/weights_fc2.npy") fc2_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/weights_fc2.npy") fc3_weights = np.load("/kaggle/input/lenet-kernal-info/weights_fc3.npy") fc3_bias = np.load("/kaggle/input/lenet-bias-1/lenet/weight_matrices/bias_fc3.npy") # Assign the loaded weights to the model model.conv1_weights = conv1_weights model.conv1_bias = conv1_bias model.conv2_weights = conv2_weights model.conv2_bias = conv2_bias model.fc1_weights = fc1_weights model.fc1_bias = fc1_bias model.fc2_weights = fc2_weights model.fc2_bias = fc2_bias model.fc3_weights = fc3_weights model.fc3_bias = fc3_bias image_path = "path_to_your_image.jpg" # Replace with the actual image path image = Image.open(image_path) image = image.resize((32, 32)) # Resize the image to 32x32 image = np.array(image) # Convert the image to a numpy array image = image.transpose( (2, 0, 1) ) # Transpose the dimensions to match LeNet-5 input shape image = image.astype(np.float32) / 255.0 # Normalize the pixel values between 0 and 1 # Perform forward propagation output = model.forward_propagation(image) # Get the predicted class predicted_class = np.argmax(output) print("Predicted Class:", predicted_class)
false
0
2,300
0
2,300
2,300
129420736
<jupyter_start><jupyter_text>Football/Soccer | Bundesliga Player Database The Bundesliga Players dataset provides a comprehensive collection of information on every player in the German Bundesliga football league. From renowned goalkeepers to talented defenders, this dataset offers an extensive range of player details including their names, full names, ages, heights, nationalities, places of birth, prices, maximum prices, positions, shirt numbers, preferred foot, current clubs, contract expiration dates, dates of joining the clubs, player agents, and outfitters. Whether you're a passionate football fan, a sports analyst, or a fantasy football enthusiast, this dataset serves as a valuable resource for exploring and analyzing the profiles of Bundesliga players, enabling you to delve into their backgrounds, performance statistics, and club affiliations. Discover the stars of German football and gain insights into their careers with this comprehensive Bundesliga Players dataset. Kaggle dataset identifier: bundesliga-soccer-player <jupyter_script># # Market Value Prediction with Randome Forest Regressor # ### Short look up in the Data + Imports # - `numpy`: Fundamental package for scientific computing. # - `pandas`: Library for data manipulation and analysis. # - `sklearn.compose.ColumnTransformer`: Applies different preprocessing steps to dataset columns. # - `sklearn.preprocessing.OneHotEncoder`: Encodes categorical variables into binary matrix representation. # - `sklearn.metrics.mean_squared_error`: Metric for evaluating regression models. # - `sklearn.model_selection.train_test_split`: Splits dataset into training and testing subsets. # - `sklearn.ensemble.RandomForestRegressor`: Ensemble regression model using decision trees. # - `sklearn.model_selection.GridSearchCV`: Technique for hyperparameter tuning. # import numpy as np import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error, mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV df_players = pd.read_csv( "/kaggle/input/bundesliga-soccer-player/bundesliga_player.csv", index_col=[0] ) df_players.head() df_players.describe() df_players.info() # ### Short nan value handling # The two columns with too much nan values become 'none' string to replace the nan values with a categorical value. Afterwards it is easy to drop rows with nan values. df_players = df_players[ [ "age", "height", "nationality", "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", "price", ] ] df_players["outfitter"] = df_players["outfitter"].replace(np.nan, "none") df_players["player_agent"] = df_players["player_agent"].replace(np.nan, "none") df_players.dropna(inplace=True) print(f"df_players shape: {df_players.shape}") # ### Variable Usefulness for Predicting Price # 1. Age: Age is likely to be a useful variable for predicting price as younger players generally have higher market values due to their potential for growth and longer career ahead. # 2. Height: Height might have some influence on the price as certain positions or playing styles may favor taller players. However, its impact on price may not be as significant compared to other variables. # 4. Club: Club affiliation is an important variable for predicting price. Players from high-profile clubs or clubs known for producing top talent are often valued more highly in the market. # 5. Position: Position is a crucial factor in determining price. Different positions have varying levels of demand and scarcity, leading to variations in market values. # 6. Contract Expiry Date: The remaining duration of a player's contract can impact their price. Players with longer contract terms may have higher values due to increased stability and reduced transfer urgency. # 7. Contract Start Date: The start date of a player's current contract may have less influence on predicting price compared to other variables. It is more indicative of the player's history with the club rather than their current market value. # 8. Agency/Representative: The player's agency or representative is not directly related to their market value. It is more of a logistical detail and does not provide significant insight into predicting price. # 9. Sponsorship Brand: The sponsorship brand associated with a player does not have a direct impact on their market value. While brand endorsements can increase a player's overall earnings, it may not be a significant factor in price prediction. # 10. Right/Left-Footed: A player's dominant foot is unlikely to have a substantial impact on their market value. It is more relevant to their playing style or preferred positions rather than predicting price. # 11. Max Price (Excluded): The "max price" variable should be excluded from the prediction model because it represents the actual target variable we want to predict to much. Including it as a feature would result in data leakage and lead to an overly optimistic evaluation of the model's performance. # Note: The above analysis is based on general assumptions and domain knowledge. It is recommended to validate the significance of these variables through statistical analysis and feature selection techniques specific to the dataset and prediction task at hand. # df_target = df_players[["price"]] df_features = df_players[ [ "age", "height", "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", ] ] # ### One Hot Encoding # In the Data are many categorical features and the Random Forest Regressor can't handle those. Because of this One Hot Encoding is used. This implies that the categorical features will be split into different binary columns which can tell the model if this category is true or not.Nevertheless, first is a look into those variables needed. for column in df_features.columns: unique_values = df_features[column].unique() print(f"Unique values in column '{column}': {unique_values}") # Now we use the ColumnTransformer to apply the One Hot Encoding columns_to_encode = [ "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", ] ct = ColumnTransformer( transformers=[("encoder", OneHotEncoder(), columns_to_encode)], remainder="passthrough", ) df_features_encoded = ct.fit_transform(df_features) df_features_encoded.shape # ### Train and Test Split x_train, x_test, y_train, y_test = train_test_split( df_features_encoded, df_target, test_size=0.3, random_state=0 ) y_train = y_train.values.ravel() y_test = y_test.values.ravel() print(f"x_train: {x_train.shape}") print(f"x_test: {x_test.shape}") print(f"y_train: {y_train.shape}") print(f"y_test: {y_test.shape}") # ### Hyperparameter tuning # Using GridSearch for hyperparameter tuning is a good option because it allows us to systematically search through different combinations of hyperparameters and find the optimal configuration for our model. It automates the process of tuning hyperparameters, saving time(not computing time :p) and effort. GridSearch performs an exhaustive search over the specified hyperparameter grid, evaluating each combination using cross-validation. This helps us find the hyperparameters that yield the best performance based on the chosen evaluation metric. By using GridSearch, we can effectively optimize our model without the need for manual trial and error. # param_grid = { "n_estimators": np.arange(10, 1000, 50), "max_depth": np.arange(5, 20, 2), "min_samples_split": np.arange(2, 11, 2), "min_samples_leaf": np.arange(1, 10, 2), } rfr = RandomForestRegressor() grid_search = GridSearchCV( estimator=rfr, param_grid=param_grid, scoring="neg_root_mean_squared_error", cv=3, verbose=2, ) grid_search.fit(x_train, y_train) best_params = grid_search.best_params_ print(grid_search.best_estimator_) # ### Modeling # The tuned hyperparameter are used to set up the model best_rfr = RandomForestRegressor(**best_params_) best_rfr.fit(x_train, y_train) # ### Evaluation # Here the values are predicted and also evaluated by MAE, MSE and RMSE. The important comparison is between RMSE and MAE because there we can see how the outlayers or large errors will impact the result. pred = best_rfr.predict(x_test) # evaluation mae = mean_absolute_error(y_test, pred) mse = mean_squared_error(y_test, pred) rmse = np.sqrt(mse) print("mean absolute error: ", mae) print("mean squared error: ", mse) print("root mean squared error: ", rmse)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/420/129420736.ipynb
bundesliga-soccer-player
oles04
[{"Id": 129420736, "ScriptId": 38447882, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12065933, "CreationDate": "05/13/2023 16:30:59", "VersionNumber": 4.0, "Title": "Market Value Prediction with Randome Forest", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 145.0, "LinesInsertedFromPrevious": 60.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 85.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185455470, "KernelVersionId": 129420736, "SourceDatasetVersionId": 5668174}]
[{"Id": 5668174, "DatasetId": 3258253, "DatasourceVersionId": 5743664, "CreatorUserId": 12065933, "LicenseName": "Other (specified in description)", "CreationDate": "05/12/2023 07:42:13", "VersionNumber": 1.0, "Title": "Football/Soccer | Bundesliga Player Database", "Slug": "bundesliga-soccer-player", "Subtitle": "Bundesliga Player Database: Complete Profiles, Stats, and Clubs of each Player", "Description": "The Bundesliga Players dataset provides a comprehensive collection of information on every player in the German Bundesliga football league. From renowned goalkeepers to talented defenders, this dataset offers an extensive range of player details including their names, full names, ages, heights, nationalities, places of birth, prices, maximum prices, positions, shirt numbers, preferred foot, current clubs, contract expiration dates, dates of joining the clubs, player agents, and outfitters. Whether you're a passionate football fan, a sports analyst, or a fantasy football enthusiast, this dataset serves as a valuable resource for exploring and analyzing the profiles of Bundesliga players, enabling you to delve into their backgrounds, performance statistics, and club affiliations. Discover the stars of German football and gain insights into their careers with this comprehensive Bundesliga Players dataset.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3258253, "CreatorUserId": 12065933, "OwnerUserId": 12065933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5668174.0, "CurrentDatasourceVersionId": 5743664.0, "ForumId": 3323776, "Type": 2, "CreationDate": "05/12/2023 07:42:13", "LastActivityDate": "05/12/2023", "TotalViews": 7284, "TotalDownloads": 1339, "TotalVotes": 37, "TotalKernels": 11}]
[{"Id": 12065933, "UserName": "oles04", "DisplayName": "Ole", "RegisterDate": "10/23/2022", "PerformanceTier": 2}]
# # Market Value Prediction with Randome Forest Regressor # ### Short look up in the Data + Imports # - `numpy`: Fundamental package for scientific computing. # - `pandas`: Library for data manipulation and analysis. # - `sklearn.compose.ColumnTransformer`: Applies different preprocessing steps to dataset columns. # - `sklearn.preprocessing.OneHotEncoder`: Encodes categorical variables into binary matrix representation. # - `sklearn.metrics.mean_squared_error`: Metric for evaluating regression models. # - `sklearn.model_selection.train_test_split`: Splits dataset into training and testing subsets. # - `sklearn.ensemble.RandomForestRegressor`: Ensemble regression model using decision trees. # - `sklearn.model_selection.GridSearchCV`: Technique for hyperparameter tuning. # import numpy as np import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error, mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV df_players = pd.read_csv( "/kaggle/input/bundesliga-soccer-player/bundesliga_player.csv", index_col=[0] ) df_players.head() df_players.describe() df_players.info() # ### Short nan value handling # The two columns with too much nan values become 'none' string to replace the nan values with a categorical value. Afterwards it is easy to drop rows with nan values. df_players = df_players[ [ "age", "height", "nationality", "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", "price", ] ] df_players["outfitter"] = df_players["outfitter"].replace(np.nan, "none") df_players["player_agent"] = df_players["player_agent"].replace(np.nan, "none") df_players.dropna(inplace=True) print(f"df_players shape: {df_players.shape}") # ### Variable Usefulness for Predicting Price # 1. Age: Age is likely to be a useful variable for predicting price as younger players generally have higher market values due to their potential for growth and longer career ahead. # 2. Height: Height might have some influence on the price as certain positions or playing styles may favor taller players. However, its impact on price may not be as significant compared to other variables. # 4. Club: Club affiliation is an important variable for predicting price. Players from high-profile clubs or clubs known for producing top talent are often valued more highly in the market. # 5. Position: Position is a crucial factor in determining price. Different positions have varying levels of demand and scarcity, leading to variations in market values. # 6. Contract Expiry Date: The remaining duration of a player's contract can impact their price. Players with longer contract terms may have higher values due to increased stability and reduced transfer urgency. # 7. Contract Start Date: The start date of a player's current contract may have less influence on predicting price compared to other variables. It is more indicative of the player's history with the club rather than their current market value. # 8. Agency/Representative: The player's agency or representative is not directly related to their market value. It is more of a logistical detail and does not provide significant insight into predicting price. # 9. Sponsorship Brand: The sponsorship brand associated with a player does not have a direct impact on their market value. While brand endorsements can increase a player's overall earnings, it may not be a significant factor in price prediction. # 10. Right/Left-Footed: A player's dominant foot is unlikely to have a substantial impact on their market value. It is more relevant to their playing style or preferred positions rather than predicting price. # 11. Max Price (Excluded): The "max price" variable should be excluded from the prediction model because it represents the actual target variable we want to predict to much. Including it as a feature would result in data leakage and lead to an overly optimistic evaluation of the model's performance. # Note: The above analysis is based on general assumptions and domain knowledge. It is recommended to validate the significance of these variables through statistical analysis and feature selection techniques specific to the dataset and prediction task at hand. # df_target = df_players[["price"]] df_features = df_players[ [ "age", "height", "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", ] ] # ### One Hot Encoding # In the Data are many categorical features and the Random Forest Regressor can't handle those. Because of this One Hot Encoding is used. This implies that the categorical features will be split into different binary columns which can tell the model if this category is true or not.Nevertheless, first is a look into those variables needed. for column in df_features.columns: unique_values = df_features[column].unique() print(f"Unique values in column '{column}': {unique_values}") # Now we use the ColumnTransformer to apply the One Hot Encoding columns_to_encode = [ "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", ] ct = ColumnTransformer( transformers=[("encoder", OneHotEncoder(), columns_to_encode)], remainder="passthrough", ) df_features_encoded = ct.fit_transform(df_features) df_features_encoded.shape # ### Train and Test Split x_train, x_test, y_train, y_test = train_test_split( df_features_encoded, df_target, test_size=0.3, random_state=0 ) y_train = y_train.values.ravel() y_test = y_test.values.ravel() print(f"x_train: {x_train.shape}") print(f"x_test: {x_test.shape}") print(f"y_train: {y_train.shape}") print(f"y_test: {y_test.shape}") # ### Hyperparameter tuning # Using GridSearch for hyperparameter tuning is a good option because it allows us to systematically search through different combinations of hyperparameters and find the optimal configuration for our model. It automates the process of tuning hyperparameters, saving time(not computing time :p) and effort. GridSearch performs an exhaustive search over the specified hyperparameter grid, evaluating each combination using cross-validation. This helps us find the hyperparameters that yield the best performance based on the chosen evaluation metric. By using GridSearch, we can effectively optimize our model without the need for manual trial and error. # param_grid = { "n_estimators": np.arange(10, 1000, 50), "max_depth": np.arange(5, 20, 2), "min_samples_split": np.arange(2, 11, 2), "min_samples_leaf": np.arange(1, 10, 2), } rfr = RandomForestRegressor() grid_search = GridSearchCV( estimator=rfr, param_grid=param_grid, scoring="neg_root_mean_squared_error", cv=3, verbose=2, ) grid_search.fit(x_train, y_train) best_params = grid_search.best_params_ print(grid_search.best_estimator_) # ### Modeling # The tuned hyperparameter are used to set up the model best_rfr = RandomForestRegressor(**best_params_) best_rfr.fit(x_train, y_train) # ### Evaluation # Here the values are predicted and also evaluated by MAE, MSE and RMSE. The important comparison is between RMSE and MAE because there we can see how the outlayers or large errors will impact the result. pred = best_rfr.predict(x_test) # evaluation mae = mean_absolute_error(y_test, pred) mse = mean_squared_error(y_test, pred) rmse = np.sqrt(mse) print("mean absolute error: ", mae) print("mean squared error: ", mse) print("root mean squared error: ", rmse)
false
1
2,006
0
2,248
2,006
129457762
# # Amazon products Recommendation System # ![im1](https://cdn.activestate.com/wp-content/uploads/2019/12/RecommendationEngine.png) # Table Of Contents # # # |No | Contents # |:---| :--- # |1 | [ Introduction ](#1) # |2 | [ Types of Recommendation Systems](#2) # |3 | [ Process of building a Recommendation System](#3) # |4 | [ Data Collection](#4) # |5 | [ Data Preprocessing](#5) # |6 | [ Dataset Summary](#6) # |7 | [ Text Pre-processing](#7) # |8 | [Sentiment Analysis](#8) # # # Introduction # This project aims to use customer feedback on Amazon to provide personalized recommendations. By analyzing reviews, the system learns about customers' preferences and helps them discover products that suit their tastes. The goal is to revolutionize the way customers explore and engage with the wide range of products on Amazon by leveraging the power of machine learning and Natural Language Processing. # Companies like Amazon use different recommendation systems to provide suggestions to the customers. For example, there is **item-item collaberrative filtering**, which produces high quality recommendation system in the real time. This system is a kind of a information filtering system which seeks to predict the "rating" or preferences which user is interested in. # ![im](https://successive.tech/wp-content/uploads/2022/05/Top-Picks-For-You-on-the-Amazon-website.png) # example of Amazon Recommender System # # Types of Recommendation Systems # ![im3](https://www.xenonstack.com/hubfs/recommendation-systems-xenonstack.png) # Generations of Recommender Systems [1](https://www.xenonstack.com/blog/recommender-systems) # **Recommendation systems** were developed to address the challenge of information overload in various domains, such as e-commerce, entertainment, and content platforms as users needed assistance in navigating through loads of catalogs of products, movies, music, articles, and more. # In order to help users discover relevant and personalized items or content based on their preferences, interests, and past behavior. By analyzing user data, such as browsing history, purchase history, ratings, and interactions, recommendation systems can generate tailored suggestions that align with individual user preferences. # **Advantages :** # These systems not only enhance the user experience by saving time and effort in searching for desirable items but also drive business growth by increasing customer engagement, satisfaction, and sales.They also have the potential to introduce users to new and relevant items they may not have discovered on their own, therefore, expand their choices and improving overall user satisfaction. # There are several types of recommendation systems commonly used in machine learning and natural language processing (NLP). Here are some of the key types: [2](https://medium.com/mlearning-ai/what-are-the-types-of-recommendation-systems-3487cbafa7c9) # 1. **Content-Based Filtering:** This approach recommends items based on the user's past preferences or behavior. It analyzes the characteristics or features of items and compares them to the user's profile or history to make recommendations. For example, in a movie recommendation system, it may suggest similar movies based on genre, actors, or plot. # 2. **Collaborative Filtering:** Collaborative filtering recommends items based on the behavior and preferences of similar users. It looks for patterns and similarities among users' interactions, such as ratings or purchases, and suggests items that other like-minded users have enjoyed. This method does not rely on item characteristics but rather on user behavior. # 3. **Hybrid Approaches:** Hybrid recommendation systems combine multiple techniques to improve recommendation accuracy. They may integrate content-based and collaborative filtering methods or incorporate other machine learning algorithms to provide more precise and diverse recommendations. # 4. **Matrix Factorization:** Matrix factorization techniques, such as singular value decomposition (SVD) or alternating least squares (ALS), decompose user-item interaction matrices to identify latent factors or features. By capturing the underlying patterns, these methods can predict missing ratings and recommend items accordingly. # 5. **Deep Learning-based Methods:** Deep learning models, such as neural networks, can be applied to recommendation systems. They can learn intricate patterns and representations from large-scale data, enabling more accurate recommendations. Techniques like recurrent neural networks (RNNs) and convolutional neural networks (CNNs) have been employed in recommendation tasks. # 6. **Natural Language Processing (NLP)-based Methods:** In NLP, recommendation systems can leverage techniques like sentiment analysis, text classification, or topic modeling to extract information from textual data. By understanding user reviews, feedback, or product descriptions, NLP-based methods can provide recommendations based on textual similarity or sentiment analysis. # # Process of building a Recommendation System # ![process](https://i0.wp.com/neptune.ai/wp-content/uploads/2022/10/Structure-of-a-recommender-system.png?resize=840%2C471&ssl=1) # Building a recommender system using Amazon product reviews involves several steps. Here's an overview of the process: # 1. **Data Collection:** Obtaining the Amazon product reviews dataset. I've tried to retrieve data by scraping Amazon's website as well as look for a publicly available datasets that contain product reviews. # 2. **Data Preprocessing:** Cleaning and preprocessing the reviews data to remove noise and irrelevant information. For example : remove HTML tags, punctuation, stopwords, and converting text to lowercase. Also, performing stemming or lemmatization to normalize words. # 3. **Text Representation:** Converting the preprocessed reviews into a numerical representation that can be used by machine learning algorithms. For example : bag-of-words or term frequency-inverse document frequency (TF-IDF) can be used to represent the text data as vectors. # 4. **Sentiment Analysis:** Analyzing the sentiment of the reviews to determine whether they are positive , negative, or neutral. Sentiment analysis can be performed using various techniques, such as using pre-trained models, lexicon-based methods, or training a sentiment classifier from scratch. # 5. **Recommendation Algorithm:** We can use Collaborative filtering, content-based filtering, or hybrid approaches. # 6. **Training the Model:** Training the recommendation model using the preprocessed data. The training process depends on which algorithm we're going to chose. For example, if we use collaborative filtering, we can use techniques like matrix factorization or deep learning models such as neural networks. If we want to implement Content-based filtering we can use machine learning models such as decision trees, support vector machines (SVM), or deep learning models. # 7. **Evaluation:** Assessing the performance of our recommender system using by splitting the dataset into training validation and testing sets and use evaluation metrics : precision, recall, F1-score, and accuracy. # **Lastly, we can deploy our Recommender System on a web or mobile app for users and monitor the model's efficiency by collecting feedbacks, updating the data and refining the algorithms used.** # # Data Collection # I have tried scraping Amazon's website for product reviews. # At first I tried to collect a few examples of reviews of the same product and this was the output : # ![image.png](attachment:5f261d4e-48fc-469c-bdb1-241738ee7f3c.png) # **NOTE :** # **AMAZON ASINS** # Amazon uses ASIN (Amazon Standard Identification Number) codes to identify product. Every product listed on Amazon has its own unique ASIN code, which you can use to construct URLs to scrape that product page, reviews, or other sellers. # --- # Then I wanted to do an automatic scraping of every ASIN in the product reviews pages but it always gives an 429 error which indicates that the request was rate-limited by the server. # This is due to Amazon's rate limits to prevent excessive scraping. # ![image.png](attachment:bd1a6a12-f6d8-4b32-88cc-d13c6ac1b1d5.png) # Instead, we have publicly available datasets that contain product reviews on different categories : # **Amazon product reviews data** # This dataset contains product reviews and metadata from Amazon, including 142.8 million reviews spanning May 1996 - July 2014. # This dataset includes reviews (ratings, text, helpfulness votes), product metadata (descriptions, category information, price, brand, and image features), and links (also viewed/also bought graphs). # Format is one-review-per-line in (loose) json. See examples below for further help reading the data. # ![image.png](attachment:d33eb843-7347-4c31-94c6-419ee1e799ff.png) # **Attributes Information:** # * reviewerID - ID of the reviewer, e.g. A2SUAM1J3GNN3B # * asin - ID of the product, e.g. 0000013714 # * reviewerName - name of the reviewer # * helpful - helpfulness rating of the review, e.g. 2/3 # * reviewText - text of the review # * overall - rating of the product # * summary - summary of the review # * unixReviewTime - time of the review (unix time) # * reviewTime - time of the review (raw) # **Note :**These datasets were sourced from : [Amazon Product Reviews](https://cseweb.ucsd.edu/~jmcauley/datasets.html#amazon_reviews) [3] # # Data Pre-processing # The fist step is to explore and clean the data we have, also as there are 3 datasets we are going to merge them in order to have 1 dataset for amazon reviews for 3 different categories # ### Importing Libraries import numpy as np # for linear algebra import pandas as pd # data processing import os from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import math import json import time import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import train_test_split from sklearn.neighbors import NearestNeighbors import joblib import scipy.sparse from scipy.sparse import csr_matrix from scipy.sparse.linalg import svds import warnings warnings.simplefilter("ignore") import nltk from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize import re from sklearn.model_selection import train_test_split from sklearn.utils import resample # #### Load the Datasets # ##### Software dataset df = pd.read_json(r"/kaggle/input/amazon-software/Software.json", lines=True) df.to_csv(r"Software.csv", index=None) # ##### fashion dataset df_1 = pd.read_json(r"/kaggle/input/amazon-fashion/AMAZON_FASHION.json", lines=True) df_1.to_csv(r"amazon_fashion.csv", index=None) # ##### Appliances dataset df_2 = pd.read_json(r"/kaggle/input/appliances/Appliances.json", lines=True) df_2.to_csv(r"Appliances.csv", index=None) # **lets take a look** software_data = pd.read_csv("Software.csv") print(f"Shape of The software dataset : {software_data.shape}") print(f"\nGlimpse of The Dataset :") software_data.head() software_data["reviewTime"] = pd.to_datetime( software_data["reviewTime"], format="%m %d, %Y" ) fashion_data = pd.read_csv("amazon_fashion.csv") print(f"Shape of The fashion dataset : {fashion_data.shape}") print(f"\nGlimpse of The Dataset :") fashion_data.head() fashion_data["reviewTime"] = pd.to_datetime( fashion_data["reviewTime"], format="%m %d, %Y" ) fashion_data.info() appliances_data = pd.read_csv("Appliances.csv") print(f"Shape of The appliances dataset : {appliances_data.shape}") print(f"\nGlimpse of The Dataset :") appliances_data.head() appliances_data["reviewTime"] = pd.to_datetime( appliances_data["reviewTime"], format="%m %d, %Y" ) # **merge the datasets together and add column of categories** # Add category column software_data["category"] = "software" appliances_data["category"] = "appliances" fashion_data["category"] = "fashion" # Concatenate the datasets merged_df = pd.concat([software_data, appliances_data, fashion_data], ignore_index=True) # Save the merged dataset merged_df.to_csv("merged_dataset.csv", index=False) print(f"Shape of The merged dataset : {merged_df.shape}") print(f"\nGlimpse of The Dataset :") merged_df.head().style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) categories = len(merged_df["category"].unique()) categories print(f"Informations about the dataset :\n") print(merged_df.info()) # # # Dataset Summary print(f"Summary of The Dataset :") merged_df.describe().T.style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) merged_df.describe(include=object).T.style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) merged_df.describe()["overall"].T # minimum of ratings : 1 # maximum of ratings : 5 # **checking null values** print("Null Values of the Dataset :") merged_df.isna().sum().to_frame().T.style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) # We can see that we have missing values in these columns: # - style # - reviewerName # - reviewText # - summary # - vote # - image # there are several approaches we can consider: # 1. **Remove Rows or Columns:** If the missing values are present in a small number of rows or columns and do not significantly impact our analysis, we can consider removing those rows or columns using the dropna() function. # 2. **Impute Missing Values:** If the missing values are present in a significant number of rows or columns, we may choose to impute or fill in those missing values with estimated or calculated values. Some common imputation techniques include replacing missing values with the mean, median, mode, or a constant value. Pandas provides the fillna() function for imputing missing values. # 3. **Predict missing values with a ML Algorithm:** In some cases, we want more sophisticated imputation techniques to predict missing values based on other features in the dataset. # from the colmuns with missing values, we can notice that they don't convey alot of important information , except 'reviewText' or maybe 'summary' # Analyze missing data missing_data = merged_df.isnull().sum() missing_percentage = (missing_data / len(merged_df)) * 100 # Create a summary DataFrame missing_summary = pd.DataFrame( { "Column": missing_data.index, "Missing Count": missing_data.values, "Missing Percentage": missing_percentage.values, } ) # Sort the summary DataFrame by missing percentage missing_summary = missing_summary.sort_values("Missing Percentage", ascending=False) # Print the summary missing_summary # Set the threshold for missing percentage threshold = 50 # remove columns with more than 50% missing values # **removing columns with a high percentage of missing values:** # Identify columns to remove columns_to_remove = missing_percentage[missing_percentage > threshold].index # Remove columns with high missing percentages merged_df = merged_df.drop(columns=columns_to_remove) # Print the updated DataFrame merged_df.head() # Impute missing values in text columns with an indicator value text_columns = ["reviewText", "summary", "reviewerName"] # Example text columns for column in text_columns: merged_df[column] = merged_df[column].fillna("Unknown") # Print the updated DataFrame merged_df.head() print("Null Values of the Dataset :") merged_df.isna().sum().to_frame().T.style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) merged_df.duplicated().value_counts() # check for duplicated data values merged_df = merged_df.drop_duplicates() merged_df.duplicated().value_counts() merged_df.head() # **Constant Features** # -Constant Features are those having the same value for all the observations of the dataset. It is advisable to remove them since they add no information that allows the ML model to classify or predict a target. from fast_ml.utilities import display_all from fast_ml.feature_selection import get_constant_features constant_features = get_constant_features(merged_df, threshold=0.90, dropna=False) display_all(constant_features) # to get list of constant features constant_feats = (constant_features["Var"]).to_list() print(constant_feats) # **there are no constant features in the dataset** # --- # ### Visualization: # **Overall Ratings:** import seaborn as sns # Check the distribution of the rating with sns.axes_style("white"): g = sns.catplot(x="overall", data=merged_df, aspect=2.0, kind="count") g.set_ylabels("Total number of ratings") # Most of the users has given the rating of 5 # **number of reviews by the verified status of the reviewers** # number of reviews by verified status verified_counts = merged_df["verified"].value_counts() plt.bar(["Verified", "Not Verified"], verified_counts.values) plt.xlabel("Verified Status") plt.ylabel("Number of Reviews") plt.title("Review Count by Verified Status") plt.show() # Most users who write reviews are verified. # Verified reviews are generally considered more trustworthy as they indicate that the reviewer has actually purchased and used the product # **number of reviews over time** merged_df["reviewTime"] = pd.to_datetime(merged_df["reviewTime"]) review_count_over_time = merged_df["reviewTime"].value_counts().sort_index() plt.plot(review_count_over_time.index, review_count_over_time.values) plt.xlabel("Review Time") plt.ylabel("Number of Reviews") plt.title("Review Count over Time") plt.xticks(rotation=45) plt.show() # We can nnoticed a spike in the year 2016, it indicates a significant increase in the number of reviews during that particular year. merged_df["month"] = merged_df["reviewTime"].dt.month review_count_by_year = merged_df["month"].value_counts().sort_index() plt.plot(review_count_by_year.index, review_count_by_year.values) plt.xlabel("Months") plt.ylabel("Number of Reviews") plt.title("Review Count by month") plt.show() # We can notice a significant increase in the number of reviews during the months January and February but it goes down until the month december. # Peak periods seem to be on January , february and december. # **Rating scores per Category** sns.barplot(x="category", y="overall", data=merged_df) plt.xlabel("Category") plt.ylabel("Sentiment Score") plt.title("Category-wise Sentiment Analysis") plt.xticks(rotation=45) plt.show() # **WE can generate sentiment scores and then use those scores to create the sentiment distribution visualization :** import nltk from nltk.sentiment import SentimentIntensityAnalyzer nltk.download("vader_lexicon") # VADER sentiment analyzer sia = SentimentIntensityAnalyzer() def analyze_sentiment(text): """ Function to analyze the sentiment of a given text using VADER sentiment analyzer """ sentiment = sia.polarity_scores(text) return sentiment["compound"] def categorize_sentiment(score): """ Function to categorize the sentiment score into sentiment labels """ if score > 0.05: return "positive" elif score < -0.05: return "negative" else: return "neutral" # **The VADER (Valence Aware Dictionary and sEntiment Reasoner) lexicon:** [4](https://github.com/cjhutto/vaderSentiment) # It is a pre-trained sentiment analysis tool, combines a large collection of words and phrases that have been manually annotated with sentiment scores. # Each word or phrase in the lexicon is assigned a sentiment intensity score that represents the degree of positivity or negativity associated with it. The sentiment scores range from -1 (extremely negative) to +1 (extremely positive). # The goal is to analyze the sentiment of a given text. # perform sentiment analysis on review text to generate sentiment scores # merged_df['sentiment_score'] = merged_df['reviewText'].apply(analyze_sentiment) # categorize sentiment scores into labels (e.g., positive, negative, neutral) # merged_df['sentiment_label'] = merged_df['sentiment_score'].apply(categorize_sentiment) # count the number of reviews for each sentiment label # sentiment_counts = merged_df['sentiment_label'].value_counts() # plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct='%1.1f%%') # plt.title('Sentiment Distribution') # plt.show() # This part of code takes a lot of time to execute merged_df.columns columns = ["unixReviewTime", "month"] merged_df = merged_df.drop(columns=columns, axis=1) # **Unique Users** print("\nTotal no of ratings :", merged_df.shape[0]) print("Total No of Users :", len(np.unique(merged_df.reviewerID))) # **number of rates per user** # number of rates per user nb_rates_per_user = ( merged_df.groupby(by="reviewerID")["overall"].count().sort_values(ascending=False) ) sns.barplot(x=nb_rates_per_user.index[:10], y=nb_rates_per_user[:10]) plt.xlabel("Reviewer ID") plt.ylabel("Number of Rates") plt.title("Top 10 Users by Number of Ratings") plt.xticks(rotation=45) plt.show() # --- # # Text Pre-processing # #### We can use ext preprocessing techniques to clean and prepare the 'reviewText' and 'summary' columns: # 1. **Removing punctuation:** we can define a set of punctuation characters using the string.punctuation module then we use a list comprehension to remove these punctuation characters from each text # 2. **Converting text to lowercase:** to ensure consistency in the text data. # 3. **Tokenization:** we can tokenize each text into a list of words.Which is the process of breaking a text into smaller units called tokens so that it can be easily analyzed and processed by machine learning algorithms. For example : "I love to play among us." After tokenization this sentence would be : ["I", "love", "to", "play", "among", "us"] # 4. **Removing stopwords:** wa can initialize a set of stopwords using the stopwords.words('english') function from NLTK and remove them from each tokenized text using a list comprehension. # 5. **Lemmatization:** It is useful in standardizing and normalizing the text data. It is basically a process of reducing words to their base or root form. For example : the words "running" "runs" and "ran" The lemma for all these words would be "run". # 6. **Handling contractions:** For example : convert "can't" to "cannot" and "won't" to "will not". This can help ensure consistent representation of words. # 7. **Removing URLs and email Addresses:** If our text data contains URLs or email addresses we can remove them as they don't typically contribute to sentiment analysis. # 8. **Removing numbers:** # 9. **Handling emoticons and emoji:** Emoticons and emojis can convey sentiment and add context to the text but we can remove them or convert them to corresponding textual representations. # 10. **Handling abbreviations and acronyms:** expand common abbreviations to their full forms to avoid losing information. For exampl : convert "lol" to "laugh out loud" and "btw" to "by the way". # 11. **Removing special characters:** This may include currency symbols, trademark symbols, etc... # 12. **Handling spelling corrections:** depending on the quality of the data, we can perform spelling corrections to improve the accuracy of sentiment analysis. This can be done using libraries like pySpellChecker or language-specific dictionaries. # 13. **Joining tokens:** finally we join the preprocessed tokens back into a single string. # #### Why do we have to split text into tokens ? # **Tokenization** is the process of splitting text into smaller units, typically words or subwords, known as tokens. # The reasons why we should do this process: # - **Analysis goal:** If our analysis requires a word-level or subword-level understanding of the text, such as sentiment analysis or language modeling, tokenization is typically necessary. # - **Text processing techniques:** Many natural language processing (NLP) techniques such as stemming, lemmatization, part-of-speech tagging, and named entity recognition operate on individual tokens, so tokenization is a necessary step before applying these techniques. # - **Model input requirements:** when using machine learning models or pre-trained language models, they often expect tokenized input, we should do tokenization to convert the raw text into a format that the models can understand. # - **Contextual understanding:** tokenization can capture the contextual meaning of words, which can be crucial for tasks like sentiment analysis. For example, "not good" and "good" have opposite sentiments, but without tokenization, they would be treated as one token ("not good") and may lose the intended meaning. # I just had this error : # ![image.png](attachment:dd7e716a-1c2f-4f7a-9ac8-58166796f65d.png) # Solution: import wget import zipfile import os url = "https://github.com/nltk/nltk_data/raw/gh-pages/packages/corpora/wordnet.zip" zip_file_path = "/kaggle/working/wordnet.zip" # Update the path as needed wget.download(url, zip_file_path) extract_dir = "/usr/share/nltk_data/corpora/" # Update the path as needed with zipfile.ZipFile(zip_file_path, "r") as zip_ref: zip_ref.extractall(extract_dir) os.remove(zip_file_path) import re import string import emoji import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer import contractions # Download NLTK resources nltk.download("stopwords") nltk.download("punkt") nltk.download("wordnet") stopwords = set(stopwords.words("english")) nltk.download("wordnet") lemmatizer = WordNetLemmatizer() def preprocess_text(text): # remove urls text = re.sub(r"http\S+|www\S+|https\S+", "", text) # remove email addresses text = re.sub(r"\S+@\S+", "", text) # remove punctuation text = "".join([char for char in text if char not in string.punctuation]) # remove emojis text = emoji.demojize(text) # convert text to lowercase text = text.lower() # remove numbers text = re.sub(r"\d+", "", text) # tokenization tokens = word_tokenize(text) # remove stopwords tokens = [word for word in tokens if word.lower() not in stopwords] # lemmatization tokens = [lemmatizer.lemmatize(word) for word in tokens] # join tokens back into a single string preprocessed_text = " ".join(tokens) # expand contractions preprocessed_text = contractions.fix(preprocessed_text) return preprocessed_text # --- merged_df.head(1) # testing the preprocessing function example = merged_df["reviewText"][0] preprocessed_text = preprocess_text(example) print( "Text before processing:\n {}\n\nText after processing:\n {}".format( example, preprocessed_text ) ) # **swifter.apply :** this function will be applied in parallel which can significantly speed up the execution time import swifter merged_df["reviewText"] = merged_df["reviewText"].swifter.apply(preprocess_text) merged_df["summary"] = merged_df["summary"].swifter.apply(preprocess_text) merged_df.head() # --- # **Handling abreviations: # We can apply web scraping to get a pre defined abbreviations dictionary** import requests from bs4 import BeautifulSoup def find_abbreviations_online(dictionary_url): response = requests.get(dictionary_url) soup = BeautifulSoup(response.text, "html.parser") abbreviations = [] meanings = [] # this example assumes the abbreviations and meanings are in separate HTML elements abbreviation_elements = soup.find_all("span", class_="abbreviation") meaning_elements = soup.find_all("span", class_="meaning") for abb_element, meaning_element in zip(abbreviation_elements, meaning_elements): abbreviation = abb_element.get_text().strip() meaning = meaning_element.get_text().strip() abbreviations.append(abbreviation) meanings.append(meaning) return abbreviations, meanings # Acronym Finder dictionary_url = "https://www.acronymfinder.com/" abbreviations, meanings = find_abbreviations_online(dictionary_url) for abbreviation, meaning in zip(abbreviations, meanings): print(f"{abbreviation}: {meaning}") # **This part of code is still under development** # --- # # Sentiment Analysis # #### We can apply sentiment analysis techniques to the preprocessed text data to classify the sentiment of each review. This can involve using pre-trained sentiment analysis models or building our own classifier using machine learning or deep learning algorithms. The sentiment analysis can assign labels such as positive, negative, or neutral to each review. # There are multiple approaches we can take : # 1. **Using Pre-trained Sentiment Analysis Models:** # - **the VADER (Valence Aware Dictionary and sEntiment Reasoner)** model. # - **TextBlob:** it uses a pre-trained model to perform sentiment analysis and provides polarity scores ranging from -1 to +1. # - **Hugging Face Transformers:** models like BERT, RoBERTa, and DistilBERT can be fine-tuned on sentiment analysis datasets to create your own sentiment classifier. # - **Stanford NLP Sentiment Analysis:** Stanford NLP provides a pre trained sentiment analysis model based on Recursive Neural Tensor Networks, it assigns sentiment labels such as very negative, negative, neutral, positive, and very positive to text. # - **IBM Watson Natural Language Understanding:** IBM Watson offers a pre trained sentiment analysis model as part of their Natural Language Understanding service # - **Google Cloud Natural Language API:** it supports sentiment analysis for multiple languages and provides sentiment scores ranging from -1 to +1. # 2. **Building a Sentiment Classifier:** # #### VADER import nltk from nltk.sentiment import SentimentIntensityAnalyzer nltk.download("vader_lexicon") # VADER sentiment analyzer sia = SentimentIntensityAnalyzer() def analyze_sentiment(text): """ Function to analyze the sentiment of a given text using VADER sentiment analyzer """ sentiment = sia.polarity_scores(text) return sentiment["compound"] def categorize_sentiment(score): """ Function to categorize the sentiment score into sentiment labels """ if score > 0.05: return "positive" elif score < -0.05: return "negative" else: return "neutral" # perform sentiment analysis on review text to generate sentiment scores merged_df["sentiment_score"] = merged_df["reviewText"].apply(analyze_sentiment) # categorize sentiment scores into labels (e.g., positive, negative, neutral) merged_df["sentiment_label"] = merged_df["sentiment_score"].apply(categorize_sentiment) # count the number of reviews for each sentiment label sentiment_counts = merged_df["sentiment_label"].value_counts() plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct="%1.1f%%") plt.title("Sentiment Distribution") plt.show() merged_df.head(1) filtered_df = merged_df[merged_df["category"] == "software"] filtered_df.to_csv("/kaggle/working/filtered_dataset.csv", index=False) filtered_df.shape from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score rating_labels = merged_df["overall"].apply( lambda x: "positive" if x >= 3 else "negative" ) vader_labels = merged_df["sentiment_label"] accuracy = accuracy_score(rating_labels, vader_labels) print("Overall Sentiment Accuracy:", accuracy) # Evaluate precision, recall, and F1-score precision = precision_score( rating_labels, vader_labels, pos_label="positive", average="weighted" ) recall = recall_score( rating_labels, vader_labels, pos_label="positive", average="weighted" ) f1 = f1_score(rating_labels, vader_labels, pos_label="positive", average="weighted") print("Positive Sentiment Precision:", precision) print("Positive Sentiment Recall:", recall) print("Positive Sentiment F1-Score:", f1) # **The chart pie indicates overall a high level of satisfaction among customers** positive_reviews = merged_df[merged_df["sentiment_label"] == "positive"]["reviewText"] sample_positive_reviews = positive_reviews.sample(n=5) for review in sample_positive_reviews: print(review) print("---") negative_reviews = merged_df[merged_df["sentiment_label"] == "negative"]["reviewText"] sample_negative_reviews = negative_reviews.sample(n=5) for review in sample_negative_reviews: print(review) print("---") neutral_reviews = merged_df[merged_df["sentiment_label"] == "neutral"]["reviewText"] sample_neutral_reviews = neutral_reviews.sample(n=5) for review in sample_neutral_reviews: print(review) print("---") merged_df.head() columns = ["sentiment_score", "sentiment_label"] new_df = merged_df.drop(columns=columns, axis=1) # **Word CLoud** from wordcloud import WordCloud def show_wordcloud(data, title=None): wordcloud = WordCloud( background_color="white", max_words=200, max_font_size=40, scale=3, random_state=42, ).generate(str(data)) fig = plt.figure(1, figsize=(20, 20)) plt.axis("off") if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() # **Positive Reviews** positiveReviews_df = merged_df.loc[merged_df["sentiment_label"] == "positive"] show_wordcloud(positiveReviews_df["reviewText"]) # **Negative Reviews** negativeReviews_df = merged_df.loc[merged_df["sentiment_label"] == "negative"] show_wordcloud(negativeReviews_df["reviewText"]) # ### Text Blob from textblob import TextBlob import pandas as pd # Apply sentiment analysis using TextBlob new_df["sentiment"] = new_df["reviewText"].apply( lambda text: TextBlob(text).sentiment.polarity ) # Classify sentiment labels based on polarity scores new_df["sentiment_label"] = new_df["sentiment"].apply( lambda score: "Positive" if score > 0 else "Negative" if score < 0 else "Neutral" ) # Print the updated dataset new_df[["reviewText", "sentiment", "sentiment_label"]] # count the number of reviews for each sentiment label sentiment_counts = new_df["sentiment_label"].value_counts() plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct="%1.1f%%") plt.title("Sentiment Distribution") plt.show() # --- # ### Hugging Face Transformers # **Fine-tuning a BERT model for sentiment analysis** # columns = ['sentiment_score', 'sentiment_label'] # new_df = merged_df.drop(columns= columns, axis=1) # import torch # from transformers import BertTokenizer, BertForSequenceClassification # from torch.utils.data import DataLoader, RandomSampler # from transformers import AdamW # Load the tokenizer and BERT model # tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2) # Preprocess the dataset # Assuming you have already loaded your dataset and split it into train and test sets # train_texts = train_dataset['reviewText'].tolist() # train_labels = train_dataset['overall'].tolist() # test_texts = test_dataset['reviewText'].tolist() # test_labels = test_dataset['overall'].tolist() # Tokenize the texts # train_encodings = tokenizer(train_texts, truncation=True, padding=True) # test_encodings = tokenizer(test_texts, truncation=True, padding=True) # --- # # Feature Extraction # **CountVectorizer** # To analyze the text data and build a vocabulary of unique words. The resulting vocabulary can be used to convert text documents into a numerical representation suitable for machine learning algorithms. from sklearn.feature_extraction.text import CountVectorizer # features = CountVectorizer() # features.fit(merged_df["reviewText"]) # print(len(features.vocabulary_)) # print(features.vocabulary_) # bagofWords = features.transform(merged_df["reviewText"]) # print(bagofWords)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/457/129457762.ipynb
null
null
[{"Id": 129457762, "ScriptId": 38392756, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10789149, "CreationDate": "05/14/2023 02:19:02", "VersionNumber": 4.0, "Title": "NLP - Amazon products Recommender System", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 909.0, "LinesInsertedFromPrevious": 69.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 840.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
null
null
null
null
# # Amazon products Recommendation System # ![im1](https://cdn.activestate.com/wp-content/uploads/2019/12/RecommendationEngine.png) # Table Of Contents # # # |No | Contents # |:---| :--- # |1 | [ Introduction ](#1) # |2 | [ Types of Recommendation Systems](#2) # |3 | [ Process of building a Recommendation System](#3) # |4 | [ Data Collection](#4) # |5 | [ Data Preprocessing](#5) # |6 | [ Dataset Summary](#6) # |7 | [ Text Pre-processing](#7) # |8 | [Sentiment Analysis](#8) # # # Introduction # This project aims to use customer feedback on Amazon to provide personalized recommendations. By analyzing reviews, the system learns about customers' preferences and helps them discover products that suit their tastes. The goal is to revolutionize the way customers explore and engage with the wide range of products on Amazon by leveraging the power of machine learning and Natural Language Processing. # Companies like Amazon use different recommendation systems to provide suggestions to the customers. For example, there is **item-item collaberrative filtering**, which produces high quality recommendation system in the real time. This system is a kind of a information filtering system which seeks to predict the "rating" or preferences which user is interested in. # ![im](https://successive.tech/wp-content/uploads/2022/05/Top-Picks-For-You-on-the-Amazon-website.png) # example of Amazon Recommender System # # Types of Recommendation Systems # ![im3](https://www.xenonstack.com/hubfs/recommendation-systems-xenonstack.png) # Generations of Recommender Systems [1](https://www.xenonstack.com/blog/recommender-systems) # **Recommendation systems** were developed to address the challenge of information overload in various domains, such as e-commerce, entertainment, and content platforms as users needed assistance in navigating through loads of catalogs of products, movies, music, articles, and more. # In order to help users discover relevant and personalized items or content based on their preferences, interests, and past behavior. By analyzing user data, such as browsing history, purchase history, ratings, and interactions, recommendation systems can generate tailored suggestions that align with individual user preferences. # **Advantages :** # These systems not only enhance the user experience by saving time and effort in searching for desirable items but also drive business growth by increasing customer engagement, satisfaction, and sales.They also have the potential to introduce users to new and relevant items they may not have discovered on their own, therefore, expand their choices and improving overall user satisfaction. # There are several types of recommendation systems commonly used in machine learning and natural language processing (NLP). Here are some of the key types: [2](https://medium.com/mlearning-ai/what-are-the-types-of-recommendation-systems-3487cbafa7c9) # 1. **Content-Based Filtering:** This approach recommends items based on the user's past preferences or behavior. It analyzes the characteristics or features of items and compares them to the user's profile or history to make recommendations. For example, in a movie recommendation system, it may suggest similar movies based on genre, actors, or plot. # 2. **Collaborative Filtering:** Collaborative filtering recommends items based on the behavior and preferences of similar users. It looks for patterns and similarities among users' interactions, such as ratings or purchases, and suggests items that other like-minded users have enjoyed. This method does not rely on item characteristics but rather on user behavior. # 3. **Hybrid Approaches:** Hybrid recommendation systems combine multiple techniques to improve recommendation accuracy. They may integrate content-based and collaborative filtering methods or incorporate other machine learning algorithms to provide more precise and diverse recommendations. # 4. **Matrix Factorization:** Matrix factorization techniques, such as singular value decomposition (SVD) or alternating least squares (ALS), decompose user-item interaction matrices to identify latent factors or features. By capturing the underlying patterns, these methods can predict missing ratings and recommend items accordingly. # 5. **Deep Learning-based Methods:** Deep learning models, such as neural networks, can be applied to recommendation systems. They can learn intricate patterns and representations from large-scale data, enabling more accurate recommendations. Techniques like recurrent neural networks (RNNs) and convolutional neural networks (CNNs) have been employed in recommendation tasks. # 6. **Natural Language Processing (NLP)-based Methods:** In NLP, recommendation systems can leverage techniques like sentiment analysis, text classification, or topic modeling to extract information from textual data. By understanding user reviews, feedback, or product descriptions, NLP-based methods can provide recommendations based on textual similarity or sentiment analysis. # # Process of building a Recommendation System # ![process](https://i0.wp.com/neptune.ai/wp-content/uploads/2022/10/Structure-of-a-recommender-system.png?resize=840%2C471&ssl=1) # Building a recommender system using Amazon product reviews involves several steps. Here's an overview of the process: # 1. **Data Collection:** Obtaining the Amazon product reviews dataset. I've tried to retrieve data by scraping Amazon's website as well as look for a publicly available datasets that contain product reviews. # 2. **Data Preprocessing:** Cleaning and preprocessing the reviews data to remove noise and irrelevant information. For example : remove HTML tags, punctuation, stopwords, and converting text to lowercase. Also, performing stemming or lemmatization to normalize words. # 3. **Text Representation:** Converting the preprocessed reviews into a numerical representation that can be used by machine learning algorithms. For example : bag-of-words or term frequency-inverse document frequency (TF-IDF) can be used to represent the text data as vectors. # 4. **Sentiment Analysis:** Analyzing the sentiment of the reviews to determine whether they are positive , negative, or neutral. Sentiment analysis can be performed using various techniques, such as using pre-trained models, lexicon-based methods, or training a sentiment classifier from scratch. # 5. **Recommendation Algorithm:** We can use Collaborative filtering, content-based filtering, or hybrid approaches. # 6. **Training the Model:** Training the recommendation model using the preprocessed data. The training process depends on which algorithm we're going to chose. For example, if we use collaborative filtering, we can use techniques like matrix factorization or deep learning models such as neural networks. If we want to implement Content-based filtering we can use machine learning models such as decision trees, support vector machines (SVM), or deep learning models. # 7. **Evaluation:** Assessing the performance of our recommender system using by splitting the dataset into training validation and testing sets and use evaluation metrics : precision, recall, F1-score, and accuracy. # **Lastly, we can deploy our Recommender System on a web or mobile app for users and monitor the model's efficiency by collecting feedbacks, updating the data and refining the algorithms used.** # # Data Collection # I have tried scraping Amazon's website for product reviews. # At first I tried to collect a few examples of reviews of the same product and this was the output : # ![image.png](attachment:5f261d4e-48fc-469c-bdb1-241738ee7f3c.png) # **NOTE :** # **AMAZON ASINS** # Amazon uses ASIN (Amazon Standard Identification Number) codes to identify product. Every product listed on Amazon has its own unique ASIN code, which you can use to construct URLs to scrape that product page, reviews, or other sellers. # --- # Then I wanted to do an automatic scraping of every ASIN in the product reviews pages but it always gives an 429 error which indicates that the request was rate-limited by the server. # This is due to Amazon's rate limits to prevent excessive scraping. # ![image.png](attachment:bd1a6a12-f6d8-4b32-88cc-d13c6ac1b1d5.png) # Instead, we have publicly available datasets that contain product reviews on different categories : # **Amazon product reviews data** # This dataset contains product reviews and metadata from Amazon, including 142.8 million reviews spanning May 1996 - July 2014. # This dataset includes reviews (ratings, text, helpfulness votes), product metadata (descriptions, category information, price, brand, and image features), and links (also viewed/also bought graphs). # Format is one-review-per-line in (loose) json. See examples below for further help reading the data. # ![image.png](attachment:d33eb843-7347-4c31-94c6-419ee1e799ff.png) # **Attributes Information:** # * reviewerID - ID of the reviewer, e.g. A2SUAM1J3GNN3B # * asin - ID of the product, e.g. 0000013714 # * reviewerName - name of the reviewer # * helpful - helpfulness rating of the review, e.g. 2/3 # * reviewText - text of the review # * overall - rating of the product # * summary - summary of the review # * unixReviewTime - time of the review (unix time) # * reviewTime - time of the review (raw) # **Note :**These datasets were sourced from : [Amazon Product Reviews](https://cseweb.ucsd.edu/~jmcauley/datasets.html#amazon_reviews) [3] # # Data Pre-processing # The fist step is to explore and clean the data we have, also as there are 3 datasets we are going to merge them in order to have 1 dataset for amazon reviews for 3 different categories # ### Importing Libraries import numpy as np # for linear algebra import pandas as pd # data processing import os from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import math import json import time import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import train_test_split from sklearn.neighbors import NearestNeighbors import joblib import scipy.sparse from scipy.sparse import csr_matrix from scipy.sparse.linalg import svds import warnings warnings.simplefilter("ignore") import nltk from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize import re from sklearn.model_selection import train_test_split from sklearn.utils import resample # #### Load the Datasets # ##### Software dataset df = pd.read_json(r"/kaggle/input/amazon-software/Software.json", lines=True) df.to_csv(r"Software.csv", index=None) # ##### fashion dataset df_1 = pd.read_json(r"/kaggle/input/amazon-fashion/AMAZON_FASHION.json", lines=True) df_1.to_csv(r"amazon_fashion.csv", index=None) # ##### Appliances dataset df_2 = pd.read_json(r"/kaggle/input/appliances/Appliances.json", lines=True) df_2.to_csv(r"Appliances.csv", index=None) # **lets take a look** software_data = pd.read_csv("Software.csv") print(f"Shape of The software dataset : {software_data.shape}") print(f"\nGlimpse of The Dataset :") software_data.head() software_data["reviewTime"] = pd.to_datetime( software_data["reviewTime"], format="%m %d, %Y" ) fashion_data = pd.read_csv("amazon_fashion.csv") print(f"Shape of The fashion dataset : {fashion_data.shape}") print(f"\nGlimpse of The Dataset :") fashion_data.head() fashion_data["reviewTime"] = pd.to_datetime( fashion_data["reviewTime"], format="%m %d, %Y" ) fashion_data.info() appliances_data = pd.read_csv("Appliances.csv") print(f"Shape of The appliances dataset : {appliances_data.shape}") print(f"\nGlimpse of The Dataset :") appliances_data.head() appliances_data["reviewTime"] = pd.to_datetime( appliances_data["reviewTime"], format="%m %d, %Y" ) # **merge the datasets together and add column of categories** # Add category column software_data["category"] = "software" appliances_data["category"] = "appliances" fashion_data["category"] = "fashion" # Concatenate the datasets merged_df = pd.concat([software_data, appliances_data, fashion_data], ignore_index=True) # Save the merged dataset merged_df.to_csv("merged_dataset.csv", index=False) print(f"Shape of The merged dataset : {merged_df.shape}") print(f"\nGlimpse of The Dataset :") merged_df.head().style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) categories = len(merged_df["category"].unique()) categories print(f"Informations about the dataset :\n") print(merged_df.info()) # # # Dataset Summary print(f"Summary of The Dataset :") merged_df.describe().T.style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) merged_df.describe(include=object).T.style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) merged_df.describe()["overall"].T # minimum of ratings : 1 # maximum of ratings : 5 # **checking null values** print("Null Values of the Dataset :") merged_df.isna().sum().to_frame().T.style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) # We can see that we have missing values in these columns: # - style # - reviewerName # - reviewText # - summary # - vote # - image # there are several approaches we can consider: # 1. **Remove Rows or Columns:** If the missing values are present in a small number of rows or columns and do not significantly impact our analysis, we can consider removing those rows or columns using the dropna() function. # 2. **Impute Missing Values:** If the missing values are present in a significant number of rows or columns, we may choose to impute or fill in those missing values with estimated or calculated values. Some common imputation techniques include replacing missing values with the mean, median, mode, or a constant value. Pandas provides the fillna() function for imputing missing values. # 3. **Predict missing values with a ML Algorithm:** In some cases, we want more sophisticated imputation techniques to predict missing values based on other features in the dataset. # from the colmuns with missing values, we can notice that they don't convey alot of important information , except 'reviewText' or maybe 'summary' # Analyze missing data missing_data = merged_df.isnull().sum() missing_percentage = (missing_data / len(merged_df)) * 100 # Create a summary DataFrame missing_summary = pd.DataFrame( { "Column": missing_data.index, "Missing Count": missing_data.values, "Missing Percentage": missing_percentage.values, } ) # Sort the summary DataFrame by missing percentage missing_summary = missing_summary.sort_values("Missing Percentage", ascending=False) # Print the summary missing_summary # Set the threshold for missing percentage threshold = 50 # remove columns with more than 50% missing values # **removing columns with a high percentage of missing values:** # Identify columns to remove columns_to_remove = missing_percentage[missing_percentage > threshold].index # Remove columns with high missing percentages merged_df = merged_df.drop(columns=columns_to_remove) # Print the updated DataFrame merged_df.head() # Impute missing values in text columns with an indicator value text_columns = ["reviewText", "summary", "reviewerName"] # Example text columns for column in text_columns: merged_df[column] = merged_df[column].fillna("Unknown") # Print the updated DataFrame merged_df.head() print("Null Values of the Dataset :") merged_df.isna().sum().to_frame().T.style.set_properties( **{"background-color": "#2a9d8f", "color": "white", "border": "1.5px solid black"} ) merged_df.duplicated().value_counts() # check for duplicated data values merged_df = merged_df.drop_duplicates() merged_df.duplicated().value_counts() merged_df.head() # **Constant Features** # -Constant Features are those having the same value for all the observations of the dataset. It is advisable to remove them since they add no information that allows the ML model to classify or predict a target. from fast_ml.utilities import display_all from fast_ml.feature_selection import get_constant_features constant_features = get_constant_features(merged_df, threshold=0.90, dropna=False) display_all(constant_features) # to get list of constant features constant_feats = (constant_features["Var"]).to_list() print(constant_feats) # **there are no constant features in the dataset** # --- # ### Visualization: # **Overall Ratings:** import seaborn as sns # Check the distribution of the rating with sns.axes_style("white"): g = sns.catplot(x="overall", data=merged_df, aspect=2.0, kind="count") g.set_ylabels("Total number of ratings") # Most of the users has given the rating of 5 # **number of reviews by the verified status of the reviewers** # number of reviews by verified status verified_counts = merged_df["verified"].value_counts() plt.bar(["Verified", "Not Verified"], verified_counts.values) plt.xlabel("Verified Status") plt.ylabel("Number of Reviews") plt.title("Review Count by Verified Status") plt.show() # Most users who write reviews are verified. # Verified reviews are generally considered more trustworthy as they indicate that the reviewer has actually purchased and used the product # **number of reviews over time** merged_df["reviewTime"] = pd.to_datetime(merged_df["reviewTime"]) review_count_over_time = merged_df["reviewTime"].value_counts().sort_index() plt.plot(review_count_over_time.index, review_count_over_time.values) plt.xlabel("Review Time") plt.ylabel("Number of Reviews") plt.title("Review Count over Time") plt.xticks(rotation=45) plt.show() # We can nnoticed a spike in the year 2016, it indicates a significant increase in the number of reviews during that particular year. merged_df["month"] = merged_df["reviewTime"].dt.month review_count_by_year = merged_df["month"].value_counts().sort_index() plt.plot(review_count_by_year.index, review_count_by_year.values) plt.xlabel("Months") plt.ylabel("Number of Reviews") plt.title("Review Count by month") plt.show() # We can notice a significant increase in the number of reviews during the months January and February but it goes down until the month december. # Peak periods seem to be on January , february and december. # **Rating scores per Category** sns.barplot(x="category", y="overall", data=merged_df) plt.xlabel("Category") plt.ylabel("Sentiment Score") plt.title("Category-wise Sentiment Analysis") plt.xticks(rotation=45) plt.show() # **WE can generate sentiment scores and then use those scores to create the sentiment distribution visualization :** import nltk from nltk.sentiment import SentimentIntensityAnalyzer nltk.download("vader_lexicon") # VADER sentiment analyzer sia = SentimentIntensityAnalyzer() def analyze_sentiment(text): """ Function to analyze the sentiment of a given text using VADER sentiment analyzer """ sentiment = sia.polarity_scores(text) return sentiment["compound"] def categorize_sentiment(score): """ Function to categorize the sentiment score into sentiment labels """ if score > 0.05: return "positive" elif score < -0.05: return "negative" else: return "neutral" # **The VADER (Valence Aware Dictionary and sEntiment Reasoner) lexicon:** [4](https://github.com/cjhutto/vaderSentiment) # It is a pre-trained sentiment analysis tool, combines a large collection of words and phrases that have been manually annotated with sentiment scores. # Each word or phrase in the lexicon is assigned a sentiment intensity score that represents the degree of positivity or negativity associated with it. The sentiment scores range from -1 (extremely negative) to +1 (extremely positive). # The goal is to analyze the sentiment of a given text. # perform sentiment analysis on review text to generate sentiment scores # merged_df['sentiment_score'] = merged_df['reviewText'].apply(analyze_sentiment) # categorize sentiment scores into labels (e.g., positive, negative, neutral) # merged_df['sentiment_label'] = merged_df['sentiment_score'].apply(categorize_sentiment) # count the number of reviews for each sentiment label # sentiment_counts = merged_df['sentiment_label'].value_counts() # plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct='%1.1f%%') # plt.title('Sentiment Distribution') # plt.show() # This part of code takes a lot of time to execute merged_df.columns columns = ["unixReviewTime", "month"] merged_df = merged_df.drop(columns=columns, axis=1) # **Unique Users** print("\nTotal no of ratings :", merged_df.shape[0]) print("Total No of Users :", len(np.unique(merged_df.reviewerID))) # **number of rates per user** # number of rates per user nb_rates_per_user = ( merged_df.groupby(by="reviewerID")["overall"].count().sort_values(ascending=False) ) sns.barplot(x=nb_rates_per_user.index[:10], y=nb_rates_per_user[:10]) plt.xlabel("Reviewer ID") plt.ylabel("Number of Rates") plt.title("Top 10 Users by Number of Ratings") plt.xticks(rotation=45) plt.show() # --- # # Text Pre-processing # #### We can use ext preprocessing techniques to clean and prepare the 'reviewText' and 'summary' columns: # 1. **Removing punctuation:** we can define a set of punctuation characters using the string.punctuation module then we use a list comprehension to remove these punctuation characters from each text # 2. **Converting text to lowercase:** to ensure consistency in the text data. # 3. **Tokenization:** we can tokenize each text into a list of words.Which is the process of breaking a text into smaller units called tokens so that it can be easily analyzed and processed by machine learning algorithms. For example : "I love to play among us." After tokenization this sentence would be : ["I", "love", "to", "play", "among", "us"] # 4. **Removing stopwords:** wa can initialize a set of stopwords using the stopwords.words('english') function from NLTK and remove them from each tokenized text using a list comprehension. # 5. **Lemmatization:** It is useful in standardizing and normalizing the text data. It is basically a process of reducing words to their base or root form. For example : the words "running" "runs" and "ran" The lemma for all these words would be "run". # 6. **Handling contractions:** For example : convert "can't" to "cannot" and "won't" to "will not". This can help ensure consistent representation of words. # 7. **Removing URLs and email Addresses:** If our text data contains URLs or email addresses we can remove them as they don't typically contribute to sentiment analysis. # 8. **Removing numbers:** # 9. **Handling emoticons and emoji:** Emoticons and emojis can convey sentiment and add context to the text but we can remove them or convert them to corresponding textual representations. # 10. **Handling abbreviations and acronyms:** expand common abbreviations to their full forms to avoid losing information. For exampl : convert "lol" to "laugh out loud" and "btw" to "by the way". # 11. **Removing special characters:** This may include currency symbols, trademark symbols, etc... # 12. **Handling spelling corrections:** depending on the quality of the data, we can perform spelling corrections to improve the accuracy of sentiment analysis. This can be done using libraries like pySpellChecker or language-specific dictionaries. # 13. **Joining tokens:** finally we join the preprocessed tokens back into a single string. # #### Why do we have to split text into tokens ? # **Tokenization** is the process of splitting text into smaller units, typically words or subwords, known as tokens. # The reasons why we should do this process: # - **Analysis goal:** If our analysis requires a word-level or subword-level understanding of the text, such as sentiment analysis or language modeling, tokenization is typically necessary. # - **Text processing techniques:** Many natural language processing (NLP) techniques such as stemming, lemmatization, part-of-speech tagging, and named entity recognition operate on individual tokens, so tokenization is a necessary step before applying these techniques. # - **Model input requirements:** when using machine learning models or pre-trained language models, they often expect tokenized input, we should do tokenization to convert the raw text into a format that the models can understand. # - **Contextual understanding:** tokenization can capture the contextual meaning of words, which can be crucial for tasks like sentiment analysis. For example, "not good" and "good" have opposite sentiments, but without tokenization, they would be treated as one token ("not good") and may lose the intended meaning. # I just had this error : # ![image.png](attachment:dd7e716a-1c2f-4f7a-9ac8-58166796f65d.png) # Solution: import wget import zipfile import os url = "https://github.com/nltk/nltk_data/raw/gh-pages/packages/corpora/wordnet.zip" zip_file_path = "/kaggle/working/wordnet.zip" # Update the path as needed wget.download(url, zip_file_path) extract_dir = "/usr/share/nltk_data/corpora/" # Update the path as needed with zipfile.ZipFile(zip_file_path, "r") as zip_ref: zip_ref.extractall(extract_dir) os.remove(zip_file_path) import re import string import emoji import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer import contractions # Download NLTK resources nltk.download("stopwords") nltk.download("punkt") nltk.download("wordnet") stopwords = set(stopwords.words("english")) nltk.download("wordnet") lemmatizer = WordNetLemmatizer() def preprocess_text(text): # remove urls text = re.sub(r"http\S+|www\S+|https\S+", "", text) # remove email addresses text = re.sub(r"\S+@\S+", "", text) # remove punctuation text = "".join([char for char in text if char not in string.punctuation]) # remove emojis text = emoji.demojize(text) # convert text to lowercase text = text.lower() # remove numbers text = re.sub(r"\d+", "", text) # tokenization tokens = word_tokenize(text) # remove stopwords tokens = [word for word in tokens if word.lower() not in stopwords] # lemmatization tokens = [lemmatizer.lemmatize(word) for word in tokens] # join tokens back into a single string preprocessed_text = " ".join(tokens) # expand contractions preprocessed_text = contractions.fix(preprocessed_text) return preprocessed_text # --- merged_df.head(1) # testing the preprocessing function example = merged_df["reviewText"][0] preprocessed_text = preprocess_text(example) print( "Text before processing:\n {}\n\nText after processing:\n {}".format( example, preprocessed_text ) ) # **swifter.apply :** this function will be applied in parallel which can significantly speed up the execution time import swifter merged_df["reviewText"] = merged_df["reviewText"].swifter.apply(preprocess_text) merged_df["summary"] = merged_df["summary"].swifter.apply(preprocess_text) merged_df.head() # --- # **Handling abreviations: # We can apply web scraping to get a pre defined abbreviations dictionary** import requests from bs4 import BeautifulSoup def find_abbreviations_online(dictionary_url): response = requests.get(dictionary_url) soup = BeautifulSoup(response.text, "html.parser") abbreviations = [] meanings = [] # this example assumes the abbreviations and meanings are in separate HTML elements abbreviation_elements = soup.find_all("span", class_="abbreviation") meaning_elements = soup.find_all("span", class_="meaning") for abb_element, meaning_element in zip(abbreviation_elements, meaning_elements): abbreviation = abb_element.get_text().strip() meaning = meaning_element.get_text().strip() abbreviations.append(abbreviation) meanings.append(meaning) return abbreviations, meanings # Acronym Finder dictionary_url = "https://www.acronymfinder.com/" abbreviations, meanings = find_abbreviations_online(dictionary_url) for abbreviation, meaning in zip(abbreviations, meanings): print(f"{abbreviation}: {meaning}") # **This part of code is still under development** # --- # # Sentiment Analysis # #### We can apply sentiment analysis techniques to the preprocessed text data to classify the sentiment of each review. This can involve using pre-trained sentiment analysis models or building our own classifier using machine learning or deep learning algorithms. The sentiment analysis can assign labels such as positive, negative, or neutral to each review. # There are multiple approaches we can take : # 1. **Using Pre-trained Sentiment Analysis Models:** # - **the VADER (Valence Aware Dictionary and sEntiment Reasoner)** model. # - **TextBlob:** it uses a pre-trained model to perform sentiment analysis and provides polarity scores ranging from -1 to +1. # - **Hugging Face Transformers:** models like BERT, RoBERTa, and DistilBERT can be fine-tuned on sentiment analysis datasets to create your own sentiment classifier. # - **Stanford NLP Sentiment Analysis:** Stanford NLP provides a pre trained sentiment analysis model based on Recursive Neural Tensor Networks, it assigns sentiment labels such as very negative, negative, neutral, positive, and very positive to text. # - **IBM Watson Natural Language Understanding:** IBM Watson offers a pre trained sentiment analysis model as part of their Natural Language Understanding service # - **Google Cloud Natural Language API:** it supports sentiment analysis for multiple languages and provides sentiment scores ranging from -1 to +1. # 2. **Building a Sentiment Classifier:** # #### VADER import nltk from nltk.sentiment import SentimentIntensityAnalyzer nltk.download("vader_lexicon") # VADER sentiment analyzer sia = SentimentIntensityAnalyzer() def analyze_sentiment(text): """ Function to analyze the sentiment of a given text using VADER sentiment analyzer """ sentiment = sia.polarity_scores(text) return sentiment["compound"] def categorize_sentiment(score): """ Function to categorize the sentiment score into sentiment labels """ if score > 0.05: return "positive" elif score < -0.05: return "negative" else: return "neutral" # perform sentiment analysis on review text to generate sentiment scores merged_df["sentiment_score"] = merged_df["reviewText"].apply(analyze_sentiment) # categorize sentiment scores into labels (e.g., positive, negative, neutral) merged_df["sentiment_label"] = merged_df["sentiment_score"].apply(categorize_sentiment) # count the number of reviews for each sentiment label sentiment_counts = merged_df["sentiment_label"].value_counts() plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct="%1.1f%%") plt.title("Sentiment Distribution") plt.show() merged_df.head(1) filtered_df = merged_df[merged_df["category"] == "software"] filtered_df.to_csv("/kaggle/working/filtered_dataset.csv", index=False) filtered_df.shape from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score rating_labels = merged_df["overall"].apply( lambda x: "positive" if x >= 3 else "negative" ) vader_labels = merged_df["sentiment_label"] accuracy = accuracy_score(rating_labels, vader_labels) print("Overall Sentiment Accuracy:", accuracy) # Evaluate precision, recall, and F1-score precision = precision_score( rating_labels, vader_labels, pos_label="positive", average="weighted" ) recall = recall_score( rating_labels, vader_labels, pos_label="positive", average="weighted" ) f1 = f1_score(rating_labels, vader_labels, pos_label="positive", average="weighted") print("Positive Sentiment Precision:", precision) print("Positive Sentiment Recall:", recall) print("Positive Sentiment F1-Score:", f1) # **The chart pie indicates overall a high level of satisfaction among customers** positive_reviews = merged_df[merged_df["sentiment_label"] == "positive"]["reviewText"] sample_positive_reviews = positive_reviews.sample(n=5) for review in sample_positive_reviews: print(review) print("---") negative_reviews = merged_df[merged_df["sentiment_label"] == "negative"]["reviewText"] sample_negative_reviews = negative_reviews.sample(n=5) for review in sample_negative_reviews: print(review) print("---") neutral_reviews = merged_df[merged_df["sentiment_label"] == "neutral"]["reviewText"] sample_neutral_reviews = neutral_reviews.sample(n=5) for review in sample_neutral_reviews: print(review) print("---") merged_df.head() columns = ["sentiment_score", "sentiment_label"] new_df = merged_df.drop(columns=columns, axis=1) # **Word CLoud** from wordcloud import WordCloud def show_wordcloud(data, title=None): wordcloud = WordCloud( background_color="white", max_words=200, max_font_size=40, scale=3, random_state=42, ).generate(str(data)) fig = plt.figure(1, figsize=(20, 20)) plt.axis("off") if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() # **Positive Reviews** positiveReviews_df = merged_df.loc[merged_df["sentiment_label"] == "positive"] show_wordcloud(positiveReviews_df["reviewText"]) # **Negative Reviews** negativeReviews_df = merged_df.loc[merged_df["sentiment_label"] == "negative"] show_wordcloud(negativeReviews_df["reviewText"]) # ### Text Blob from textblob import TextBlob import pandas as pd # Apply sentiment analysis using TextBlob new_df["sentiment"] = new_df["reviewText"].apply( lambda text: TextBlob(text).sentiment.polarity ) # Classify sentiment labels based on polarity scores new_df["sentiment_label"] = new_df["sentiment"].apply( lambda score: "Positive" if score > 0 else "Negative" if score < 0 else "Neutral" ) # Print the updated dataset new_df[["reviewText", "sentiment", "sentiment_label"]] # count the number of reviews for each sentiment label sentiment_counts = new_df["sentiment_label"].value_counts() plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct="%1.1f%%") plt.title("Sentiment Distribution") plt.show() # --- # ### Hugging Face Transformers # **Fine-tuning a BERT model for sentiment analysis** # columns = ['sentiment_score', 'sentiment_label'] # new_df = merged_df.drop(columns= columns, axis=1) # import torch # from transformers import BertTokenizer, BertForSequenceClassification # from torch.utils.data import DataLoader, RandomSampler # from transformers import AdamW # Load the tokenizer and BERT model # tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2) # Preprocess the dataset # Assuming you have already loaded your dataset and split it into train and test sets # train_texts = train_dataset['reviewText'].tolist() # train_labels = train_dataset['overall'].tolist() # test_texts = test_dataset['reviewText'].tolist() # test_labels = test_dataset['overall'].tolist() # Tokenize the texts # train_encodings = tokenizer(train_texts, truncation=True, padding=True) # test_encodings = tokenizer(test_texts, truncation=True, padding=True) # --- # # Feature Extraction # **CountVectorizer** # To analyze the text data and build a vocabulary of unique words. The resulting vocabulary can be used to convert text documents into a numerical representation suitable for machine learning algorithms. from sklearn.feature_extraction.text import CountVectorizer # features = CountVectorizer() # features.fit(merged_df["reviewText"]) # print(len(features.vocabulary_)) # print(features.vocabulary_) # bagofWords = features.transform(merged_df["reviewText"]) # print(bagofWords)
false
0
9,492
4
9,492
9,492
129457340
<jupyter_start><jupyter_text>IMDB Top 250 Movies This dataset is having the data of the top 250 Movies as per their IMDB rating listed on the official website of IMDB **Features** - rank - Movie Rank as per IMDB rating - movie_id - Movie ID - title - Name of the Movie - year - Year of Movie release - link - URL for the Movie - imdb_votes - Number of people who voted for the IMDB rating - imdb_rating - Rating of the Movie - certificate - Movie Certification - duration - Duration of the Movie - genre - Genre of the Movie - cast_id - ID of the cast member who have worked on the Movie - cast_name - Name of the cast member who have worked on the Movie - director_id - ID of the director who have directed the Movie - director_name - Name of the director who have directed the Movie - writer_id - ID of the writer who have wrote script for the Movie - writer_name - Name of the writer who have wrote script for the Movie - storyline - Storyline of the Movie - user_id - ID of the user who wrote review for the Movie - user_name - Name of the user who wrote review for the Movie - review_id - ID of the user review - review_title - Short review - review_content - Long review **Inspiration** IMDB is one of the main sources which people use to judge the movie or show. IMDB rating plays an important role for a lot of people watching a movie or show. I watched The Shawshank Redemption after finding out that it's at the top of the list on IMDB. I've created this dataset so that people can play with this dataset and do a lot of things as mentioned below - Dataset Walkthrough - Understanding Dataset Hierarchy - Data Preprocessing - Exploratory Data Analysis - Data Visualization - Making Recommendation System This is a list of some of that things that you can do on this dataset. It's not definitely limited to the one that is mentioned there but a lot more other things can also be done. Kaggle dataset identifier: imdb-top-250-movies <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns print("Setup Complete") movies_data = pd.read_csv("/kaggle/input/imdb-top-250-movies/movies.csv") # # Initial Exploration # get column names list(movies_data.columns) # get number of columns movies_data.shape[1] # summarize data movies_data.describe() movies_data.head() # # Cleaning Data # Remove columns with unnecesarry data like links, names, excessively long ids, and complete movie reviews movies_data.drop( [ "link", "writer_id", "user_id", "director_id", "user_name", "cast_id", "cast_name", "review_id", "review_title", "review_content", "storyline", ], axis=1, ) # # Summarizing Data # Return the mean of ratings by the year a movie was released tbl = ( movies_data.groupby("year") .agg(c3_mean=("imbd_rating", "mean"), count=("year", "count")) .reset_index() ) print(tbl) # # Visualization # draw a plot with number of "imbd_votes" as the x axis and "imbd_rating" as the y axis, # add a smoothed line (lm = linear model) # (to show a correlation between popularity and rating) sns.regplot(x=movies_data["imbd_votes"], y=movies_data["imbd_rating"])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/457/129457340.ipynb
imdb-top-250-movies
karkavelrajaj
[{"Id": 129457340, "ScriptId": 38407730, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13439255, "CreationDate": "05/14/2023 02:11:24", "VersionNumber": 1.0, "Title": "P: IMDB Top 25 Movies", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 59.0, "LinesInsertedFromPrevious": 59.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185528833, "KernelVersionId": 129457340, "SourceDatasetVersionId": 5141446}]
[{"Id": 5141446, "DatasetId": 2987095, "DatasourceVersionId": 5213084, "CreatorUserId": 9355447, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "03/10/2023 15:02:53", "VersionNumber": 1.0, "Title": "IMDB Top 250 Movies", "Slug": "imdb-top-250-movies", "Subtitle": "This dataset is having the data of top 250 Movies as per their IMDB rating", "Description": "This dataset is having the data of the top 250 Movies as per their IMDB rating listed on the official website of IMDB\n\n**Features**\n\n- rank - Movie Rank as per IMDB rating\n- movie_id - Movie ID\n- title - Name of the Movie \n- year - Year of Movie release\n- link - URL for the Movie \n- imdb_votes - Number of people who voted for the IMDB rating\n- imdb_rating - Rating of the Movie \n- certificate - Movie Certification\n- duration - Duration of the Movie \n- genre - Genre of the Movie \n- cast_id - ID of the cast member who have worked on the Movie \n- cast_name - Name of the cast member who have worked on the Movie \n- director_id - ID of the director who have directed the Movie \n- director_name - Name of the director who have directed the Movie \n- writer_id - ID of the writer who have wrote script for the Movie \n- writer_name - Name of the writer who have wrote script for the Movie \n- storyline - Storyline of the Movie \n- user_id - ID of the user who wrote review for the Movie \n- user_name - Name of the user who wrote review for the Movie \n- review_id - ID of the user review\n- review_title - Short review\n- review_content - Long review\n\n**Inspiration**\n\nIMDB is one of the main sources which people use to judge the movie or show. IMDB rating plays an important role for a lot of people watching a movie or show. I watched The Shawshank Redemption after finding out that it's at the top of the list on IMDB. I've created this dataset so that people can play with this dataset and do a lot of things as mentioned below\n\n- Dataset Walkthrough\n- Understanding Dataset Hierarchy\n- Data Preprocessing\n- Exploratory Data Analysis\n- Data Visualization\n- Making Recommendation System\nThis is a list of some of that things that you can do on this dataset. It's not definitely limited to the one that is mentioned there but a lot more other things can also be done.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2987095, "CreatorUserId": 9355447, "OwnerUserId": 9355447.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5141446.0, "CurrentDatasourceVersionId": 5213084.0, "ForumId": 3025757, "Type": 2, "CreationDate": "03/10/2023 15:02:53", "LastActivityDate": "03/10/2023", "TotalViews": 11814, "TotalDownloads": 2265, "TotalVotes": 36, "TotalKernels": 5}]
[{"Id": 9355447, "UserName": "karkavelrajaj", "DisplayName": "KARKAVELRAJA J", "RegisterDate": "01/09/2022", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns print("Setup Complete") movies_data = pd.read_csv("/kaggle/input/imdb-top-250-movies/movies.csv") # # Initial Exploration # get column names list(movies_data.columns) # get number of columns movies_data.shape[1] # summarize data movies_data.describe() movies_data.head() # # Cleaning Data # Remove columns with unnecesarry data like links, names, excessively long ids, and complete movie reviews movies_data.drop( [ "link", "writer_id", "user_id", "director_id", "user_name", "cast_id", "cast_name", "review_id", "review_title", "review_content", "storyline", ], axis=1, ) # # Summarizing Data # Return the mean of ratings by the year a movie was released tbl = ( movies_data.groupby("year") .agg(c3_mean=("imbd_rating", "mean"), count=("year", "count")) .reset_index() ) print(tbl) # # Visualization # draw a plot with number of "imbd_votes" as the x axis and "imbd_rating" as the y axis, # add a smoothed line (lm = linear model) # (to show a correlation between popularity and rating) sns.regplot(x=movies_data["imbd_votes"], y=movies_data["imbd_rating"])
false
1
578
0
1,127
578
129999800
FILE_DIR = "/kaggle/input/titanic" # gender_submission.csv , test.csv , train.csv import tensorflow as tf import pandas as pd import matplotlib.pyplot as plt import os print(tf.__version__) df_train = pd.read_csv(os.path.join(FILE_DIR, "train.csv")) df_test = pd.read_csv(os.path.join(FILE_DIR, "test.csv")) df_train["Sex"] = pd.Categorical(df_train["Sex"]) df_train["Sex"] = df_train.Sex.cat.codes df_test["Sex"] = pd.Categorical(df_test["Sex"]) df_test["Sex"] = df_test.Sex.cat.codes features_selected = ["Sex", "SibSp", "Parch", "Fare", "Age"] df_train.Age.fillna(df_train.Age.mean(), inplace=True) df_test.Age.fillna(df_train.Age.mean(), inplace=True) def pie(data, labels): fig, ax = plt.subplots() ax.pie(data, labels=labels) plt.show() def df_to_dataset(data, with_labels=True, shuffle=True, batch_size=32): # Create a tf.data.Dataset from the dataframe and labels. if with_labels: ds = tf.data.Dataset.from_tensor_slices((dict(data[0]), data[1])) if shuffle: # Shuffle dataset. ds = ds.shuffle(len(data)) # Batch dataset with specified batch_size parameter. ds = ds.batch(batch_size) return ds ds = tf.data.Dataset.from_tensor_slices((dict(data))) return ds.batch(batch_size) # from sklearn.model_selection import train_test_split train = df_train[features_selected] y_train = df_train.Survived test = df_test[features_selected] # split = int(0.5 * len(train)) # val = train[split:] # y_val = y_train[split:] # train = train[:split] # y_train = y_train[:split] train_ds = df_to_dataset((train, y_train), batch_size=16) test_ds = df_to_dataset(test, with_labels=False, batch_size=16) # val_ds = df_to_dataset(val, y_val,shuffle=False, batch_size = 16) Age = tf.feature_column.numeric_column("Age") Fare = tf.feature_column.numeric_column("Fare") boundaries_Age = [5, 12, 18, 30, 40, 60, 80] boundaries_Fare = [10, 20, 30, 40, 60, 80, 100] BucketAge = tf.feature_column.bucketized_column(Age, boundaries_Age) BucketFare = tf.feature_column.bucketized_column(Fare, boundaries_Fare) FeatureColumn = [] for feature in features_selected: numeric_feature_column = tf.feature_column.numeric_column(feature) FeatureColumn.append(numeric_feature_column) FeatureColumn.append(BucketAge) FeatureColumn.append(BucketFare) model = tf.keras.Sequential( [ tf.keras.layers.DenseFeatures(FeatureColumn), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) history = model.fit( train_ds, # validation_data=val_ds, epochs=500, verbose=0, ) acc = history.history["accuracy"] # val_acc = history.history['val_accuracy'] loss = history.history["loss"] # val_loss = history.history['val_loss'] print(f"accuracy = {acc[-1]}") fix, ax = plt.subplots(2) ax[0].plot(acc) # ax[0].plot(val_acc) ax[1].plot(loss) # ax[1].plot(val_loss) plt.show() import numpy as np pred_raw = model.predict(test_ds) pred = np.array(pred_raw > 0.5, dtype=int).flatten() print(len(pred)) submit = pd.read_csv(os.path.join(FILE_DIR, "gender_submission.csv")) my_submission = submit.copy() pie(submit.Survived.value_counts(), ["S", "M"]) my_submission.Survived = pred pie(my_submission.Survived.value_counts(), ["S", "M"]) my_submission.to_csv("my_submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/999/129999800.ipynb
null
null
[{"Id": 129999800, "ScriptId": 37347690, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9755272, "CreationDate": "05/18/2023 03:33:48", "VersionNumber": 1.0, "Title": "Titanic", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 121.0, "LinesInsertedFromPrevious": 121.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 6}]
null
null
null
null
FILE_DIR = "/kaggle/input/titanic" # gender_submission.csv , test.csv , train.csv import tensorflow as tf import pandas as pd import matplotlib.pyplot as plt import os print(tf.__version__) df_train = pd.read_csv(os.path.join(FILE_DIR, "train.csv")) df_test = pd.read_csv(os.path.join(FILE_DIR, "test.csv")) df_train["Sex"] = pd.Categorical(df_train["Sex"]) df_train["Sex"] = df_train.Sex.cat.codes df_test["Sex"] = pd.Categorical(df_test["Sex"]) df_test["Sex"] = df_test.Sex.cat.codes features_selected = ["Sex", "SibSp", "Parch", "Fare", "Age"] df_train.Age.fillna(df_train.Age.mean(), inplace=True) df_test.Age.fillna(df_train.Age.mean(), inplace=True) def pie(data, labels): fig, ax = plt.subplots() ax.pie(data, labels=labels) plt.show() def df_to_dataset(data, with_labels=True, shuffle=True, batch_size=32): # Create a tf.data.Dataset from the dataframe and labels. if with_labels: ds = tf.data.Dataset.from_tensor_slices((dict(data[0]), data[1])) if shuffle: # Shuffle dataset. ds = ds.shuffle(len(data)) # Batch dataset with specified batch_size parameter. ds = ds.batch(batch_size) return ds ds = tf.data.Dataset.from_tensor_slices((dict(data))) return ds.batch(batch_size) # from sklearn.model_selection import train_test_split train = df_train[features_selected] y_train = df_train.Survived test = df_test[features_selected] # split = int(0.5 * len(train)) # val = train[split:] # y_val = y_train[split:] # train = train[:split] # y_train = y_train[:split] train_ds = df_to_dataset((train, y_train), batch_size=16) test_ds = df_to_dataset(test, with_labels=False, batch_size=16) # val_ds = df_to_dataset(val, y_val,shuffle=False, batch_size = 16) Age = tf.feature_column.numeric_column("Age") Fare = tf.feature_column.numeric_column("Fare") boundaries_Age = [5, 12, 18, 30, 40, 60, 80] boundaries_Fare = [10, 20, 30, 40, 60, 80, 100] BucketAge = tf.feature_column.bucketized_column(Age, boundaries_Age) BucketFare = tf.feature_column.bucketized_column(Fare, boundaries_Fare) FeatureColumn = [] for feature in features_selected: numeric_feature_column = tf.feature_column.numeric_column(feature) FeatureColumn.append(numeric_feature_column) FeatureColumn.append(BucketAge) FeatureColumn.append(BucketFare) model = tf.keras.Sequential( [ tf.keras.layers.DenseFeatures(FeatureColumn), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) history = model.fit( train_ds, # validation_data=val_ds, epochs=500, verbose=0, ) acc = history.history["accuracy"] # val_acc = history.history['val_accuracy'] loss = history.history["loss"] # val_loss = history.history['val_loss'] print(f"accuracy = {acc[-1]}") fix, ax = plt.subplots(2) ax[0].plot(acc) # ax[0].plot(val_acc) ax[1].plot(loss) # ax[1].plot(val_loss) plt.show() import numpy as np pred_raw = model.predict(test_ds) pred = np.array(pred_raw > 0.5, dtype=int).flatten() print(len(pred)) submit = pd.read_csv(os.path.join(FILE_DIR, "gender_submission.csv")) my_submission = submit.copy() pie(submit.Survived.value_counts(), ["S", "M"]) my_submission.Survived = pred pie(my_submission.Survived.value_counts(), ["S", "M"]) my_submission.to_csv("my_submission.csv", index=False)
false
0
1,243
6
1,243
1,243
129999789
<jupyter_start><jupyter_text>Starbucks Nutrition Facts ``` Nutrition facts for several Starbucks food items ``` | Column | Description | | ------- | ------------------------------------------------------------ | | item | The name of the food item. | | calories| The amount of calories in the food item. | | fat | The quantity of fat in grams present in the food item. | | carb | The amount of carbohydrates in grams found in the food item. | | fiber | The quantity of dietary fiber in grams in the food item. | | protein | The amount of protein in grams contained in the food item. | | type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). | Kaggle dataset identifier: starbucks-nutrition <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from pprint import pprint # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Starbucks Nutritional Information # Starbucks provides comprehensive nutritional information for their food and beverage offerings, allowing customers to make informed choices based on their dietary preferences and health goals. The nutritional information includes details such as calories, fat content, carbohydrates, fiber, and protein for each menu item. # By making this information readily available, Starbucks aims to empower individuals to make choices that align with their nutritional needs and preferences. Whether you're looking for lower-calorie options, watching your fat or carb intake, or seeking protein-rich alternatives, the nutritional information provided by Starbucks helps you navigate their menu with confidence. # ## Data Coverage # The data encompasses a range of food items, from baked goods and bistro boxes to hot breakfast items, parfaits, petite treats, salads, and sandwiches. Each item is categorized based on its type, making it easier for customers to find options that suit their dietary requirements or preferences. # ## Transparency and Informed Decisions # By offering transparent and detailed nutritional information, Starbucks reinforces its commitment to supporting customers in making informed decisions about their food choices. Whether you're enjoying a coffee break or grabbing a quick bite, the nutritional information empowers you to enjoy Starbucks' offerings while being mindful of your nutritional goals. # --- # ## Data Dictionary # The data consists of nutrition facts for several Starbucks food items. It is organized in the form of a data frame with 77 observations and 7 variables. # ### Variables # - **item**: The name of the food item (string). # - **calories**: The number of calories in the food item (integer). # - **fat**: The amount of fat in grams (numeric). # - **carb**: The amount of carbohydrates in grams (numeric). # - **fiber**: The amount of dietary fiber in grams (numeric). # - **protein**: The amount of protein in grams (numeric). # - **type**: The categorization of the food item, with levels bakery, bistro box, hot breakfast, parfait, petite, salad, and sandwich (factor). # ### Additional Information # - The data frame has a RangeIndex from 0 to 76. # - There are no missing values (non-null count is 77 for all columns). # - The original data frame had an additional column named "Unnamed: 0", which has been removed for this improved data dictionary. # --- # . df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv", index_col=0) df df.describe df.info() # --- # ## Questions to ask # 1. What are the highest and lowest calorie food items offered by Starbucks? # 2. Which food items have the highest amount of fat, carbohydrates, fiber, and protein? # 3. Are there any food items that are particularly rich in fiber but low in fat and carbohydrates? # 4. What is the average calorie content of each food item type (bakery, bistro box, hot breakfast, etc.)? # 5. Is there a correlation between the calorie content and the amount of fat, carbohydrates, fiber, or protein in the food items? # 6. Which food item types have the highest average fat, carbohydrate, fiber, and protein content? # 7. Can we identify any trends or patterns in the nutritional composition of Starbucks food items? # 8. Are there any notable differences in the nutritional profile of food items across different categories (bakery, bistro box, etc.)? # 9. Are there any food items that provide a good balance of macronutrients (fat, carbohydrates, and protein)? # 10. Can we identify any outliers or unusual values in the nutritional information? # --- # ## 1. What are the highest and lowest calorie food items offered by Starbucks? # Find the highest calorie food item highest_calorie_item = df.loc[df["calories"].idxmax(), "item"] highest_calorie_value = df["calories"].max() # Find the lowest calorie food item lowest_calorie_item = df.loc[df["calories"].idxmin(), "item"] lowest_calorie_value = df["calories"].min() # Print the results print( f"Highest Calorie Food Item: {highest_calorie_item} ({highest_calorie_value} calories)" ) print( f"Lowest Calorie Food Item: {lowest_calorie_item} ({lowest_calorie_value} calories)" ) # ## 2. Which food items have the highest amount of fat, carbohydrates, fiber, and protein? # Find the food item with the highest amount of fat highest_fat_item = df.loc[df["fat"].idxmax(), "item"] highest_fat_value = df["fat"].max() # Find the food item with the highest amount of carbohydrates highest_carb_item = df.loc[df["carb"].idxmax(), "item"] highest_carb_value = df["carb"].max() # Find the food item with the highest amount of fiber highest_fiber_item = df.loc[df["fiber"].idxmax(), "item"] highest_fiber_value = df["fiber"].max() # Find the food item with the highest amount of protein highest_protein_item = df.loc[df["protein"].idxmax(), "item"] highest_protein_value = df["protein"].max() # Print the results print(f"Food Item with Highest Fat: {highest_fat_item} ({highest_fat_value} grams)") print( f"Food Item with Highest Carbohydrates: {highest_carb_item} ({highest_carb_value} grams)" ) print( f"Food Item with Highest Fiber: {highest_fiber_item} ({highest_fiber_value} grams)" ) print( f"Food Item with Highest Protein: {highest_protein_item} ({highest_protein_value} grams)" ) # ## 3. Are there any food items that are particularly rich in fiber but low in fat and carbohydrates? # Filter food items with high fiber, low fat, and low carbohydrates filtered_items = df[(df["fiber"] > 0) & (df["fat"] < 5) & (df["carb"] < 30)] # Print the filtered food items using f-strings if filtered_items.empty: print( "No food items are particularly rich in fiber but low in fat and carbohydrates." ) else: print( "Food items that are particularly rich in fiber but low in fat and carbohydrates:" ) for item in filtered_items["item"]: print(f"- {item}") # ## 4. What is the average calorie content of each food item type (bakery, bistro box, hot breakfast, etc.)? # Calculate the average calorie content of each food item type average_calories = df.groupby("type")["calories"].mean() # Print the average calorie content using f-strings print("Average Calorie Content by Food Item Type:") for food_type, avg_calories in average_calories.items(): print(f"- {food_type}: {avg_calories:.2f} calories")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/999/129999789.ipynb
starbucks-nutrition
utkarshx27
[{"Id": 129999789, "ScriptId": 38415291, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14543633, "CreationDate": "05/18/2023 03:33:34", "VersionNumber": 2.0, "Title": "Starbucks EDA", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 67.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 85.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186452349, "KernelVersionId": 129999789, "SourceDatasetVersionId": 5651811}]
[{"Id": 5651811, "DatasetId": 3248696, "DatasourceVersionId": 5727183, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/10/2023 05:42:59", "VersionNumber": 1.0, "Title": "Starbucks Nutrition Facts", "Slug": "starbucks-nutrition", "Subtitle": "Nutrition facts for several Starbucks food items", "Description": "```\nNutrition facts for several Starbucks food items\n```\n| Column | Description |\n| ------- | ------------------------------------------------------------ |\n| item | The name of the food item. |\n| calories| The amount of calories in the food item. |\n| fat | The quantity of fat in grams present in the food item. |\n| carb | The amount of carbohydrates in grams found in the food item. |\n| fiber | The quantity of dietary fiber in grams in the food item. |\n| protein | The amount of protein in grams contained in the food item. |\n| type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). |", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3248696, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5651811.0, "CurrentDatasourceVersionId": 5727183.0, "ForumId": 3314049, "Type": 2, "CreationDate": "05/10/2023 05:42:59", "LastActivityDate": "05/10/2023", "TotalViews": 12557, "TotalDownloads": 2321, "TotalVotes": 59, "TotalKernels": 17}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from pprint import pprint # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Starbucks Nutritional Information # Starbucks provides comprehensive nutritional information for their food and beverage offerings, allowing customers to make informed choices based on their dietary preferences and health goals. The nutritional information includes details such as calories, fat content, carbohydrates, fiber, and protein for each menu item. # By making this information readily available, Starbucks aims to empower individuals to make choices that align with their nutritional needs and preferences. Whether you're looking for lower-calorie options, watching your fat or carb intake, or seeking protein-rich alternatives, the nutritional information provided by Starbucks helps you navigate their menu with confidence. # ## Data Coverage # The data encompasses a range of food items, from baked goods and bistro boxes to hot breakfast items, parfaits, petite treats, salads, and sandwiches. Each item is categorized based on its type, making it easier for customers to find options that suit their dietary requirements or preferences. # ## Transparency and Informed Decisions # By offering transparent and detailed nutritional information, Starbucks reinforces its commitment to supporting customers in making informed decisions about their food choices. Whether you're enjoying a coffee break or grabbing a quick bite, the nutritional information empowers you to enjoy Starbucks' offerings while being mindful of your nutritional goals. # --- # ## Data Dictionary # The data consists of nutrition facts for several Starbucks food items. It is organized in the form of a data frame with 77 observations and 7 variables. # ### Variables # - **item**: The name of the food item (string). # - **calories**: The number of calories in the food item (integer). # - **fat**: The amount of fat in grams (numeric). # - **carb**: The amount of carbohydrates in grams (numeric). # - **fiber**: The amount of dietary fiber in grams (numeric). # - **protein**: The amount of protein in grams (numeric). # - **type**: The categorization of the food item, with levels bakery, bistro box, hot breakfast, parfait, petite, salad, and sandwich (factor). # ### Additional Information # - The data frame has a RangeIndex from 0 to 76. # - There are no missing values (non-null count is 77 for all columns). # - The original data frame had an additional column named "Unnamed: 0", which has been removed for this improved data dictionary. # --- # . df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv", index_col=0) df df.describe df.info() # --- # ## Questions to ask # 1. What are the highest and lowest calorie food items offered by Starbucks? # 2. Which food items have the highest amount of fat, carbohydrates, fiber, and protein? # 3. Are there any food items that are particularly rich in fiber but low in fat and carbohydrates? # 4. What is the average calorie content of each food item type (bakery, bistro box, hot breakfast, etc.)? # 5. Is there a correlation between the calorie content and the amount of fat, carbohydrates, fiber, or protein in the food items? # 6. Which food item types have the highest average fat, carbohydrate, fiber, and protein content? # 7. Can we identify any trends or patterns in the nutritional composition of Starbucks food items? # 8. Are there any notable differences in the nutritional profile of food items across different categories (bakery, bistro box, etc.)? # 9. Are there any food items that provide a good balance of macronutrients (fat, carbohydrates, and protein)? # 10. Can we identify any outliers or unusual values in the nutritional information? # --- # ## 1. What are the highest and lowest calorie food items offered by Starbucks? # Find the highest calorie food item highest_calorie_item = df.loc[df["calories"].idxmax(), "item"] highest_calorie_value = df["calories"].max() # Find the lowest calorie food item lowest_calorie_item = df.loc[df["calories"].idxmin(), "item"] lowest_calorie_value = df["calories"].min() # Print the results print( f"Highest Calorie Food Item: {highest_calorie_item} ({highest_calorie_value} calories)" ) print( f"Lowest Calorie Food Item: {lowest_calorie_item} ({lowest_calorie_value} calories)" ) # ## 2. Which food items have the highest amount of fat, carbohydrates, fiber, and protein? # Find the food item with the highest amount of fat highest_fat_item = df.loc[df["fat"].idxmax(), "item"] highest_fat_value = df["fat"].max() # Find the food item with the highest amount of carbohydrates highest_carb_item = df.loc[df["carb"].idxmax(), "item"] highest_carb_value = df["carb"].max() # Find the food item with the highest amount of fiber highest_fiber_item = df.loc[df["fiber"].idxmax(), "item"] highest_fiber_value = df["fiber"].max() # Find the food item with the highest amount of protein highest_protein_item = df.loc[df["protein"].idxmax(), "item"] highest_protein_value = df["protein"].max() # Print the results print(f"Food Item with Highest Fat: {highest_fat_item} ({highest_fat_value} grams)") print( f"Food Item with Highest Carbohydrates: {highest_carb_item} ({highest_carb_value} grams)" ) print( f"Food Item with Highest Fiber: {highest_fiber_item} ({highest_fiber_value} grams)" ) print( f"Food Item with Highest Protein: {highest_protein_item} ({highest_protein_value} grams)" ) # ## 3. Are there any food items that are particularly rich in fiber but low in fat and carbohydrates? # Filter food items with high fiber, low fat, and low carbohydrates filtered_items = df[(df["fiber"] > 0) & (df["fat"] < 5) & (df["carb"] < 30)] # Print the filtered food items using f-strings if filtered_items.empty: print( "No food items are particularly rich in fiber but low in fat and carbohydrates." ) else: print( "Food items that are particularly rich in fiber but low in fat and carbohydrates:" ) for item in filtered_items["item"]: print(f"- {item}") # ## 4. What is the average calorie content of each food item type (bakery, bistro box, hot breakfast, etc.)? # Calculate the average calorie content of each food item type average_calories = df.groupby("type")["calories"].mean() # Print the average calorie content using f-strings print("Average Calorie Content by Food Item Type:") for food_type, avg_calories in average_calories.items(): print(f"- {food_type}: {avg_calories:.2f} calories")
false
1
2,003
0
2,219
2,003
129999011
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/playground-series-s3e15/data.csv") df.head() df.describe() df.isna().sum() df.shape df.hist(figsize=(30, 20)) df.columns x_eNull = df[df["x_e_out [-]"].isna()] x_eNull.head() x_eNull.isna().sum() x_eNull.hist(figsize=(30, 20)) # Impute the null values in the other columns import miceforest as mf df.dtypes from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(df[["geometry"]]) df[ohe.categories_[0]] = transformed.toarray() df.head() from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(df[["author"]]) df[ohe.categories_[0]] = transformed.toarray() df.head() df.drop(["author", "geometry", np.nan], axis=1, inplace=True) df = df.rename( columns={ "pressure [MPa]": "pressure", "mass_flux [kg/m2-s]": "mass_flux", "x_e_out [-]": "x_e_out", "D_e [mm]": "D_e", "D_h [mm]": "D_h", "length [mm]": "length", "chf_exp [MW/m2]": "chf_exp", } ) # Create kernel. kds = mf.ImputationKernel(df, save_all_iterations=True, random_state=1991) # Run the MICE algorithm for 3 iterations kds.mice(3) # Return the completed kernel data completed_data = kds.complete_data() completed_data.isna().sum() x_eNull.drop(["x_e_out [-]"], axis=1, inplace=True) completed_data.head() submission = pd.merge(x_eNull, completed_data[["id", "x_e_out"]], on="id") submission.head() submission = submission[["id", "x_e_out"]] submission.to_csv("submission.csv", index=False) x_eNotNull = df[df["x_e_out [-]"].isna() == False] import seaborn as sns df.columns # sns.heatmap(df[['id','pressure', 'mass_flux', 'x_e_out', 'D_e', #'D_h', 'length', 'chf_exp']]) sns.heatmap( df[["id", "pressure", "mass_flux", "x_e_out", "D_e", "D_h", "length", "chf_exp"]], linewidths=0.30, annot=True, ) x_eNotNull.head() x_eNotNull.hist(figsize=(30, 20)) # The features in Not Null dataframe and the Null dataframe seems to be very similar # We can take the Not Null to train and use the Null as the test dataset df.dtypes # Lets try using Simple Regression to handle both Categorical and float df["author"].nunique() df["geometry"].nunique() df = df.rename( columns={ "pressure [MPa]": "pressure", "mass_flux [kg/m2-s]": "mass_flux", "x_e_out [-]": "x_e_out", "D_e [mm]": "D_e", "D_h [mm]": "D_h", "length [mm]": "length", "chf_exp [MW/m2]": "chf_exp", } ) dfDropped = df.drop(["author", "geometry", "D_e", "D_h", "length"], axis=1) dfDropped.head() X = dfDropped[dfDropped["x_e_out"].isna() == False] test = dfDropped[dfDropped["x_e_out"].isna() == True] from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(x_eNotNull[["geometry"]]) x_eNotNull[ohe.categories_[0]] = transformed.toarray() x_eNotNull.head() from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(x_eNull[["geometry"]]) x_eNull[ohe.categories_[0]] = transformed.toarray() x_eNull.head() from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(x_eNull[["author"]]) x_eNull[ohe.categories_[0]] = transformed.toarray() x_eNull.head() ohe = OneHotEncoder() transformed = ohe.fit_transform(x_eNotNull[["author"]]) x_eNotNull[ohe.categories_[0]] = transformed.toarray() x_eNotNull.head() x_eNotNull.drop(["author", "geometry"], axis=1, inplace=True) x_eNull.drop(["author", "geometry"], axis=1, inplace=True) x_eNotNull.drop([np.NaN], axis=1, inplace=True) x_eNull.drop([np.NaN], axis=1, inplace=True) X = x_eNotNull.drop(["x_e_out", "id"], axis=1) y = x_eNotNull.loc[:, x_eNotNull.columns == "x_e_out"] y = X.loc[:, X.columns == "x_e_out"] X = X.drop(["x_e_out", "id"], axis=1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) x_eNull.drop(["x_e_out [-]"], axis=1, inplace=True) test = x_eNull.drop(["id", "x_e_out [-]"], axis=1) test = test.drop(["id", "x_e_out"], axis=1) test.head() import optuna from lightgbm import LGBMRegressor from sklearn.metrics import mean_squared_error def objective(trial): """ Objective function to be minimized. """ param = { "objective": "regression", "metric": "rmse", "verbosity": -1, "boosting_type": "gbdt", "lambda_l1": trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True), "lambda_l2": trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True), "num_leaves": trial.suggest_int("num_leaves", 2, 256), "feature_fraction": trial.suggest_float("feature_fraction", 0.4, 1.0), "bagging_fraction": trial.suggest_float("bagging_fraction", 0.4, 1.0), "bagging_freq": trial.suggest_int("bagging_freq", 1, 7), "min_child_samples": trial.suggest_int("min_child_samples", 5, 100), } gbm = LGBMRegressor(**param) gbm.fit(X_train, y_train) preds = gbm.predict(X_test) rmse = mean_squared_error(y_test, preds, squared=False) return rmse study = optuna.create_study() study.optimize(objective, n_trials=50) print("Number of finished trials:", len(study.trials)) print("Best trial:", study.best_trial.params) import lightgbm as lgb from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold # X = X_train.copy() # y = X.pop('x_e_out') seed = 42 splits = 10 # Initialize KFold cross-validation kf = KFold(n_splits=splits, shuffle=True, random_state=42) val_preds = np.zeros(len(X)) val_scores = [] prediction = np.zeros((len(test))) params = { "objective": "regression", "metric": "rmse", "lambda_l1": 8.086864003164224e-05, "lambda_l2": 0.7435736341285621, "num_leaves": 164, "feature_fraction": 0.9707722414736359, "bagging_fraction": 0.4910694147596062, "bagging_freq": 1, "min_child_samples": 83, } from sklearn.preprocessing import RobustScaler # Loop through each fold for fold, (train_idx, val_idx) in enumerate(kf.split(X, y)): # Split data into training and validation sets X_train, y_train = X.iloc[train_idx], y.iloc[train_idx] X_val, y_val = X.iloc[val_idx], y.iloc[val_idx] train_data = lgb.Dataset(X_train, label=y_train) val_data = lgb.Dataset(X_val, label=y_val) # Train model with early stopping model = lgb.train( params, train_data, valid_sets=[train_data, val_data], early_stopping_rounds=7000, verbose_eval=False, ) # Make validation predictions and calculate validation score val_preds[val_idx] += model.predict(X_val) val_score = mean_squared_error(y_val, val_preds[val_idx], squared=False) val_scores.append(val_score) # Print validation score print(f"Fold {fold+1}: Validation score: {val_score:.4f}") prediction += model.predict(test) avg_val_score = np.mean(val_scores) print(f"Average validation score: {avg_val_score:.4f}") prediction /= splits from lightgbm import plot_importance plot_importance(model, figsize=(10, 9)) # custom function to run light gbm model def run_lgb(train_X, train_y, val_X, val_y, test_X): params = { "objective": "regression", "metric": "rmse", "lambda_l1": 1.2619723670327868e-07, "lambda_l2": 9.721462975369603, "num_leaves": 41, "feature_fraction": 0.7697328380987326, "bagging_fraction": 0.7132023581183115, "bagging_freq": 6, "min_child_samples": 37, } lgtrain = lgb.Dataset(train_X, label=train_y) lgval = lgb.Dataset(val_X, label=val_y) # cv_lgb = lgb.cv(params, lgtrain, num_boost_round=700, nfold=3, # verbose_eval=20, early_stopping_rounds=40) model = lgb.train( params, lgtrain, 7000, valid_sets=[lgval], early_stopping_rounds=100, verbose_eval=100, ) pred_test_y = model.predict(x_eNull, num_iteration=model.best_iteration) pred_val_y = model.predict(val_X, num_iteration=model.best_iteration) return pred_test_y, model, pred_val_y # Training the model # pred_test, model, pred_val = run_lgb(X_train, y_train, X_test, y_test, x_eNull) print(prediction) x_eNull["x_e_out [-]"] = prediction submit = x_eNull[["id", "x_e_out [-]"]] submit.to_csv("imputation_submit_5", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/999/129999011.ipynb
null
null
[{"Id": 129999011, "ScriptId": 38597963, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2851726, "CreationDate": "05/18/2023 03:22:47", "VersionNumber": 1.0, "Title": "imputation_Heat", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 300.0, "LinesInsertedFromPrevious": 300.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/playground-series-s3e15/data.csv") df.head() df.describe() df.isna().sum() df.shape df.hist(figsize=(30, 20)) df.columns x_eNull = df[df["x_e_out [-]"].isna()] x_eNull.head() x_eNull.isna().sum() x_eNull.hist(figsize=(30, 20)) # Impute the null values in the other columns import miceforest as mf df.dtypes from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(df[["geometry"]]) df[ohe.categories_[0]] = transformed.toarray() df.head() from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(df[["author"]]) df[ohe.categories_[0]] = transformed.toarray() df.head() df.drop(["author", "geometry", np.nan], axis=1, inplace=True) df = df.rename( columns={ "pressure [MPa]": "pressure", "mass_flux [kg/m2-s]": "mass_flux", "x_e_out [-]": "x_e_out", "D_e [mm]": "D_e", "D_h [mm]": "D_h", "length [mm]": "length", "chf_exp [MW/m2]": "chf_exp", } ) # Create kernel. kds = mf.ImputationKernel(df, save_all_iterations=True, random_state=1991) # Run the MICE algorithm for 3 iterations kds.mice(3) # Return the completed kernel data completed_data = kds.complete_data() completed_data.isna().sum() x_eNull.drop(["x_e_out [-]"], axis=1, inplace=True) completed_data.head() submission = pd.merge(x_eNull, completed_data[["id", "x_e_out"]], on="id") submission.head() submission = submission[["id", "x_e_out"]] submission.to_csv("submission.csv", index=False) x_eNotNull = df[df["x_e_out [-]"].isna() == False] import seaborn as sns df.columns # sns.heatmap(df[['id','pressure', 'mass_flux', 'x_e_out', 'D_e', #'D_h', 'length', 'chf_exp']]) sns.heatmap( df[["id", "pressure", "mass_flux", "x_e_out", "D_e", "D_h", "length", "chf_exp"]], linewidths=0.30, annot=True, ) x_eNotNull.head() x_eNotNull.hist(figsize=(30, 20)) # The features in Not Null dataframe and the Null dataframe seems to be very similar # We can take the Not Null to train and use the Null as the test dataset df.dtypes # Lets try using Simple Regression to handle both Categorical and float df["author"].nunique() df["geometry"].nunique() df = df.rename( columns={ "pressure [MPa]": "pressure", "mass_flux [kg/m2-s]": "mass_flux", "x_e_out [-]": "x_e_out", "D_e [mm]": "D_e", "D_h [mm]": "D_h", "length [mm]": "length", "chf_exp [MW/m2]": "chf_exp", } ) dfDropped = df.drop(["author", "geometry", "D_e", "D_h", "length"], axis=1) dfDropped.head() X = dfDropped[dfDropped["x_e_out"].isna() == False] test = dfDropped[dfDropped["x_e_out"].isna() == True] from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(x_eNotNull[["geometry"]]) x_eNotNull[ohe.categories_[0]] = transformed.toarray() x_eNotNull.head() from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(x_eNull[["geometry"]]) x_eNull[ohe.categories_[0]] = transformed.toarray() x_eNull.head() from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() transformed = ohe.fit_transform(x_eNull[["author"]]) x_eNull[ohe.categories_[0]] = transformed.toarray() x_eNull.head() ohe = OneHotEncoder() transformed = ohe.fit_transform(x_eNotNull[["author"]]) x_eNotNull[ohe.categories_[0]] = transformed.toarray() x_eNotNull.head() x_eNotNull.drop(["author", "geometry"], axis=1, inplace=True) x_eNull.drop(["author", "geometry"], axis=1, inplace=True) x_eNotNull.drop([np.NaN], axis=1, inplace=True) x_eNull.drop([np.NaN], axis=1, inplace=True) X = x_eNotNull.drop(["x_e_out", "id"], axis=1) y = x_eNotNull.loc[:, x_eNotNull.columns == "x_e_out"] y = X.loc[:, X.columns == "x_e_out"] X = X.drop(["x_e_out", "id"], axis=1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) x_eNull.drop(["x_e_out [-]"], axis=1, inplace=True) test = x_eNull.drop(["id", "x_e_out [-]"], axis=1) test = test.drop(["id", "x_e_out"], axis=1) test.head() import optuna from lightgbm import LGBMRegressor from sklearn.metrics import mean_squared_error def objective(trial): """ Objective function to be minimized. """ param = { "objective": "regression", "metric": "rmse", "verbosity": -1, "boosting_type": "gbdt", "lambda_l1": trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True), "lambda_l2": trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True), "num_leaves": trial.suggest_int("num_leaves", 2, 256), "feature_fraction": trial.suggest_float("feature_fraction", 0.4, 1.0), "bagging_fraction": trial.suggest_float("bagging_fraction", 0.4, 1.0), "bagging_freq": trial.suggest_int("bagging_freq", 1, 7), "min_child_samples": trial.suggest_int("min_child_samples", 5, 100), } gbm = LGBMRegressor(**param) gbm.fit(X_train, y_train) preds = gbm.predict(X_test) rmse = mean_squared_error(y_test, preds, squared=False) return rmse study = optuna.create_study() study.optimize(objective, n_trials=50) print("Number of finished trials:", len(study.trials)) print("Best trial:", study.best_trial.params) import lightgbm as lgb from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold # X = X_train.copy() # y = X.pop('x_e_out') seed = 42 splits = 10 # Initialize KFold cross-validation kf = KFold(n_splits=splits, shuffle=True, random_state=42) val_preds = np.zeros(len(X)) val_scores = [] prediction = np.zeros((len(test))) params = { "objective": "regression", "metric": "rmse", "lambda_l1": 8.086864003164224e-05, "lambda_l2": 0.7435736341285621, "num_leaves": 164, "feature_fraction": 0.9707722414736359, "bagging_fraction": 0.4910694147596062, "bagging_freq": 1, "min_child_samples": 83, } from sklearn.preprocessing import RobustScaler # Loop through each fold for fold, (train_idx, val_idx) in enumerate(kf.split(X, y)): # Split data into training and validation sets X_train, y_train = X.iloc[train_idx], y.iloc[train_idx] X_val, y_val = X.iloc[val_idx], y.iloc[val_idx] train_data = lgb.Dataset(X_train, label=y_train) val_data = lgb.Dataset(X_val, label=y_val) # Train model with early stopping model = lgb.train( params, train_data, valid_sets=[train_data, val_data], early_stopping_rounds=7000, verbose_eval=False, ) # Make validation predictions and calculate validation score val_preds[val_idx] += model.predict(X_val) val_score = mean_squared_error(y_val, val_preds[val_idx], squared=False) val_scores.append(val_score) # Print validation score print(f"Fold {fold+1}: Validation score: {val_score:.4f}") prediction += model.predict(test) avg_val_score = np.mean(val_scores) print(f"Average validation score: {avg_val_score:.4f}") prediction /= splits from lightgbm import plot_importance plot_importance(model, figsize=(10, 9)) # custom function to run light gbm model def run_lgb(train_X, train_y, val_X, val_y, test_X): params = { "objective": "regression", "metric": "rmse", "lambda_l1": 1.2619723670327868e-07, "lambda_l2": 9.721462975369603, "num_leaves": 41, "feature_fraction": 0.7697328380987326, "bagging_fraction": 0.7132023581183115, "bagging_freq": 6, "min_child_samples": 37, } lgtrain = lgb.Dataset(train_X, label=train_y) lgval = lgb.Dataset(val_X, label=val_y) # cv_lgb = lgb.cv(params, lgtrain, num_boost_round=700, nfold=3, # verbose_eval=20, early_stopping_rounds=40) model = lgb.train( params, lgtrain, 7000, valid_sets=[lgval], early_stopping_rounds=100, verbose_eval=100, ) pred_test_y = model.predict(x_eNull, num_iteration=model.best_iteration) pred_val_y = model.predict(val_X, num_iteration=model.best_iteration) return pred_test_y, model, pred_val_y # Training the model # pred_test, model, pred_val = run_lgb(X_train, y_train, X_test, y_test, x_eNull) print(prediction) x_eNull["x_e_out [-]"] = prediction submit = x_eNull[["id", "x_e_out [-]"]] submit.to_csv("imputation_submit_5", index=False)
false
0
3,298
0
3,298
3,298
129364413
<jupyter_start><jupyter_text>Netflix TV Shows and Movies ## **Netflix - TV Shows and Movies** &gt; This data set was created to list all shows available on Netflix streaming, and analyze the data to find interesting facts. This data was acquired in July 2022 containing data available in the United States. ## **Content** &gt; This dataset has two files containing the titles (**titles.csv**) and the cast (**credits.csv**) for the title. &gt; This dataset contains **+5k** unique **titles on Netflix** with 15 columns containing their information, including: &gt; - id: The title ID on JustWatch. &gt; - title: The name of the title. &gt; - show_type: TV show or movie. &gt; - description: A brief description. &gt; - release_year: The release year. &gt; - age_certification: The age certification. &gt; - runtime: The length of the episode (SHOW) or movie. &gt; - genres: A list of genres. &gt; - production_countries: A list of countries that produced the title. &gt; - seasons: Number of seasons if it's a SHOW. &gt; - imdb_id: The title ID on IMDB. &gt; - imdb_score: Score on IMDB. &gt; - imdb_votes: Votes on IMDB. &gt; - tmdb_popularity: Popularity on TMDB. &gt; - tmdb_score: Score on TMDB. &gt; And **over +77k** credits of **actors and directors** on Netflix titles with 5 columns containing their information, including: &gt; - person_ID: The person ID on JustWatch. &gt; - id: The title ID on JustWatch. &gt; - name: The actor or director's name. &gt; - character_name: The character name. &gt; - role: ACTOR or DIRECTOR. ##**Tasks** &gt; - Developing a content-based recommender system using the genres and/or descriptions. &gt; - Identifying the main content available on the streaming. &gt; - Network analysis on the cast of the titles. &gt; - Exploratory data analysis to find interesting insights. ## **Other Streaming Datasets** &gt; - [HBO Max TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/hbo-max-tv-shows-and-movies?select=titles.csv) &gt; - [Amazon Prime TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/amazon-prime-tv-shows-and-movies?select=titles.csv) &gt; - [Disney+ TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/disney-tv-shows-and-movies?select=titles.csv) &gt; - [Hulu TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/hulu-tv-shows-and-movies?select=titles.csv) &gt; - [Paramount TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/paramount-tv-shows-and-movies?select=titles.csv) &gt; - [Rakuten Viki TV Dramas and Movies](https://www.kaggle.com/datasets/victorsoeiro/rakuten-tv-dramas-and-movies?select=titles.csv) &gt; - [Crunchyroll Animes and Movies](https://www.kaggle.com/datasets/victorsoeiro/crunchyroll-animes-and-movies) &gt; - [Dark Matter TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/dark-matter-tv-shows-and-movies) ##**How to obtain the data** &gt; If you want to see how I obtained these data, please check my [GitHub repository](https://github.com/victor-soeiro/WebScraping-Projects/tree/main/justwatch). ## **Acknowledgements** &gt; All data were collected from [JustWatch](https://www.justwatch.com/us). Kaggle dataset identifier: netflix-tv-shows-and-movies <jupyter_code>import pandas as pd df = pd.read_csv('netflix-tv-shows-and-movies/titles.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 5850 entries, 0 to 5849 Data columns (total 15 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 5850 non-null object 1 title 5849 non-null object 2 type 5850 non-null object 3 description 5832 non-null object 4 release_year 5850 non-null int64 5 age_certification 3231 non-null object 6 runtime 5850 non-null int64 7 genres 5850 non-null object 8 production_countries 5850 non-null object 9 seasons 2106 non-null float64 10 imdb_id 5447 non-null object 11 imdb_score 5368 non-null float64 12 imdb_votes 5352 non-null float64 13 tmdb_popularity 5759 non-null float64 14 tmdb_score 5539 non-null float64 dtypes: float64(5), int64(2), object(8) memory usage: 685.7+ KB <jupyter_text>Examples: { "id": "ts300399", "title": "Five Came Back: The Reference Films", "type": "SHOW", "description": "This collection includes 12 World War II-era propaganda films \u2014 many of which are graphic and offensive \u2014 discussed in the docuseries \"Five Came Back.\"", "release_year": 1945, "age_certification": "TV-MA", "runtime": 51, "genres": "['documentation']", "production_countries": "['US']", "seasons": 1.0, "imdb_id": null, "imdb_score": NaN, "imdb_votes": NaN, "tmdb_popularity": 0.6000000000000001, "tmdb_score": NaN } { "id": "tm84618", "title": "Taxi Driver", "type": "MOVIE", "description": "A mentally unstable Vietnam War veteran works as a night-time taxi driver in New York City where the perceived decadence and sleaze feed his urge for violent action.", "release_year": 1976, "age_certification": "R", "runtime": 114, "genres": "['drama', 'crime']", "production_countries": "['US']", "seasons": NaN, "imdb_id": "tt0075314", "imdb_score": 8.2, "imdb_votes": 808582.0, "tmdb_popularity": 40.965, "tmdb_score": 8.179 } { "id": "tm154986", "title": "Deliverance", "type": "MOVIE", "description": "Intent on seeing the Cahulawassee River before it's turned into one huge lake, outdoor fanatic Lewis Medlock takes his friends on a river-rafting trip they'll never forget into the dangerous American back-country.", "release_year": 1972, "age_certification": "R", "runtime": 109, "genres": "['drama', 'action', 'thriller', 'european']", "production_countries": "['US']", "seasons": NaN, "imdb_id": "tt0068473", "imdb_score": 7.7, "imdb_votes": 107673.0, "tmdb_popularity": 10.01, "tmdb_score": 7.3 } { "id": "tm127384", "title": "Monty Python and the Holy Grail", "type": "MOVIE", "description": "King Arthur, accompanied by his squire, recruits his Knights of the Round Table, including Sir Bedevere the Wise, Sir Lancelot the Brave, Sir Robin the Not-Quite-So-Brave-As-Sir-Lancelot and Sir Galahad the Pure. On the way, Arthur battles the Black Knight who, despite having had...(truncated)", "release_year": 1975, "age_certification": "PG", "runtime": 91, "genres": "['fantasy', 'action', 'comedy']", "production_countries": "['GB']", "seasons": NaN, "imdb_id": "tt0071853", "imdb_score": 8.2, "imdb_votes": 534486.0, "tmdb_popularity": 15.461, "tmdb_score": 7.811 } <jupyter_code>import pandas as pd df = pd.read_csv('netflix-tv-shows-and-movies/credits.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 77801 entries, 0 to 77800 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 person_id 77801 non-null int64 1 id 77801 non-null object 2 name 77801 non-null object 3 character 68029 non-null object 4 role 77801 non-null object dtypes: int64(1), object(4) memory usage: 3.0+ MB <jupyter_text>Examples: { "person_id": 3748, "id": "tm84618", "name": "Robert De Niro", "character": "Travis Bickle", "role": "ACTOR" } { "person_id": 14658, "id": "tm84618", "name": "Jodie Foster", "character": "Iris Steensma", "role": "ACTOR" } { "person_id": 7064, "id": "tm84618", "name": "Albert Brooks", "character": "Tom", "role": "ACTOR" } { "person_id": 3739, "id": "tm84618", "name": "Harvey Keitel", "character": "Matthew 'Sport' Higgins", "role": "ACTOR" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") # # 1. Import Files and explore the datasets # df_credits = pd.read_csv("/kaggle/input/netflix-tv-shows-and-movies/credits.csv") df_credits.head() df_credits.info() df_credits.isna().sum() df_titles = pd.read_csv("/kaggle/input/netflix-tv-shows-and-movies/titles.csv") df_titles.head() df_titles.isna().sum() df_titles.info() # ## Explanation of the cleaning process # * Ignore missing values for the following variables, as they have no implications for the analysis: # - description # - age_certification # - imdb_id # * For the variable seasons, the missing values will be replaced by 0 which corresponds to the Movie (which have no season, it is logical) # * Replacement of the missing values in the following variables by the median or the mean (if there are no outliers values) : imdb_score, imdb_votes, tmdb_score and tmdb_popularity # * Correct the two variables 'genres' and 'productions_countries' to remove the square brackets and quotes. def clean_variables(dataframe): dataframe.dropna(axis=0, subset=["title"], inplace=True) dataframe["imdb_score"] = dataframe["imdb_score"].fillna( value=dataframe["imdb_score"].mean() ) dataframe["imdb_votes"] = dataframe["imdb_votes"].fillna( value=dataframe["imdb_votes"].median() ) dataframe["tmdb_popularity"] = dataframe["tmdb_popularity"].fillna( value=dataframe["tmdb_popularity"].median() ) dataframe["tmdb_score"] = dataframe["tmdb_score"].fillna( value=dataframe["tmdb_score"].median() ) dataframe["seasons"] = dataframe["seasons"].fillna(value=0) clean_variables(df_titles) # Drop quotation marks and square brackets in 'genres' df_titles["genres"] = df_titles["genres"].apply( lambda x: x.replace("[", "").replace("]", "") ) df_titles["genres"] = df_titles["genres"].str.replace(r"[\'']", "") # Drop quotation marks and square brackets in'production_countries' df_titles["production_countries"] = df_titles["production_countries"].apply( lambda x: x.replace("[", "").replace("]", "") ) df_titles["production_countries"] = df_titles["production_countries"].str.replace( r"[\'']", "" ) # Drop imdb_id because no use df_titles.drop("imdb_id", 1, inplace=True) df_titles
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/364/129364413.ipynb
netflix-tv-shows-and-movies
victorsoeiro
[{"Id": 129364413, "ScriptId": 38431552, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10184546, "CreationDate": "05/13/2023 06:47:51", "VersionNumber": 1.0, "Title": "notebook3390a24f76", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 83.0, "LinesInsertedFromPrevious": 83.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185342190, "KernelVersionId": 129364413, "SourceDatasetVersionId": 3989707}]
[{"Id": 3989707, "DatasetId": 2178661, "DatasourceVersionId": 4045438, "CreatorUserId": 4697476, "LicenseName": "CC0: Public Domain", "CreationDate": "07/26/2022 19:50:06", "VersionNumber": 2.0, "Title": "Netflix TV Shows and Movies", "Slug": "netflix-tv-shows-and-movies", "Subtitle": "Movies and TV Shows listings on Netflix (July, 2022)", "Description": "## **Netflix - TV Shows and Movies**\n\n&gt; This data set was created to list all shows available on Netflix streaming, and analyze the data to find interesting facts. This data was acquired in July 2022 containing data available in the United States. \n\n## **Content**\n\n&gt; This dataset has two files containing the titles (**titles.csv**) and the cast (**credits.csv**) for the title. \n\n&gt; This dataset contains **+5k** unique **titles on Netflix** with 15 columns containing their information, including:\n\n&gt; - id: The title ID on JustWatch.\n&gt; - title: The name of the title.\n&gt; - show_type: TV show or movie.\n&gt; - description: A brief description.\n&gt; - release_year: The release year.\n&gt; - age_certification: The age certification.\n&gt; - runtime: The length of the episode (SHOW) or movie.\n&gt; - genres: A list of genres.\n&gt; - production_countries: A list of countries that produced the title.\n&gt; - seasons: Number of seasons if it's a SHOW.\n&gt; - imdb_id: The title ID on IMDB.\n&gt; - imdb_score: Score on IMDB.\n&gt; - imdb_votes: Votes on IMDB.\n&gt; - tmdb_popularity: Popularity on TMDB.\n&gt; - tmdb_score: Score on TMDB.\n\n&gt; And **over +77k** credits of **actors and directors** on Netflix titles with 5 columns containing their information, including:\n\n&gt; - person_ID: The person ID on JustWatch.\n&gt; - id: The title ID on JustWatch.\n&gt; - name: The actor or director's name.\n&gt; - character_name: The character name.\n&gt; - role: ACTOR or DIRECTOR.\n\n##**Tasks**\n&gt; - Developing a content-based recommender system using the genres and/or descriptions.\n&gt; - Identifying the main content available on the streaming.\n&gt; - Network analysis on the cast of the titles.\n&gt; - Exploratory data analysis to find interesting insights.\n\n## **Other Streaming Datasets**\n\n&gt; - [HBO Max TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/hbo-max-tv-shows-and-movies?select=titles.csv)\n&gt; - [Amazon Prime TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/amazon-prime-tv-shows-and-movies?select=titles.csv)\n&gt; - [Disney+ TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/disney-tv-shows-and-movies?select=titles.csv)\n&gt; - [Hulu TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/hulu-tv-shows-and-movies?select=titles.csv)\n&gt; - [Paramount TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/paramount-tv-shows-and-movies?select=titles.csv)\n&gt; - [Rakuten Viki TV Dramas and Movies](https://www.kaggle.com/datasets/victorsoeiro/rakuten-tv-dramas-and-movies?select=titles.csv)\n&gt; - [Crunchyroll Animes and Movies](https://www.kaggle.com/datasets/victorsoeiro/crunchyroll-animes-and-movies)\n&gt; - [Dark Matter TV Shows and Movies](https://www.kaggle.com/datasets/victorsoeiro/dark-matter-tv-shows-and-movies)\n\n##**How to obtain the data**\n\n&gt; If you want to see how I obtained these data, please check my [GitHub repository](https://github.com/victor-soeiro/WebScraping-Projects/tree/main/justwatch).\n\n## **Acknowledgements**\n\n&gt; All data were collected from [JustWatch](https://www.justwatch.com/us).", "VersionNotes": "Data Update 2022/07/26", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2178661, "CreatorUserId": 4697476, "OwnerUserId": 4697476.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3989707.0, "CurrentDatasourceVersionId": 4045438.0, "ForumId": 2204602, "Type": 2, "CreationDate": "05/15/2022 00:01:23", "LastActivityDate": "05/15/2022", "TotalViews": 176218, "TotalDownloads": 31619, "TotalVotes": 640, "TotalKernels": 114}]
[{"Id": 4697476, "UserName": "victorsoeiro", "DisplayName": "Victor Soeiro", "RegisterDate": "03/19/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") # # 1. Import Files and explore the datasets # df_credits = pd.read_csv("/kaggle/input/netflix-tv-shows-and-movies/credits.csv") df_credits.head() df_credits.info() df_credits.isna().sum() df_titles = pd.read_csv("/kaggle/input/netflix-tv-shows-and-movies/titles.csv") df_titles.head() df_titles.isna().sum() df_titles.info() # ## Explanation of the cleaning process # * Ignore missing values for the following variables, as they have no implications for the analysis: # - description # - age_certification # - imdb_id # * For the variable seasons, the missing values will be replaced by 0 which corresponds to the Movie (which have no season, it is logical) # * Replacement of the missing values in the following variables by the median or the mean (if there are no outliers values) : imdb_score, imdb_votes, tmdb_score and tmdb_popularity # * Correct the two variables 'genres' and 'productions_countries' to remove the square brackets and quotes. def clean_variables(dataframe): dataframe.dropna(axis=0, subset=["title"], inplace=True) dataframe["imdb_score"] = dataframe["imdb_score"].fillna( value=dataframe["imdb_score"].mean() ) dataframe["imdb_votes"] = dataframe["imdb_votes"].fillna( value=dataframe["imdb_votes"].median() ) dataframe["tmdb_popularity"] = dataframe["tmdb_popularity"].fillna( value=dataframe["tmdb_popularity"].median() ) dataframe["tmdb_score"] = dataframe["tmdb_score"].fillna( value=dataframe["tmdb_score"].median() ) dataframe["seasons"] = dataframe["seasons"].fillna(value=0) clean_variables(df_titles) # Drop quotation marks and square brackets in 'genres' df_titles["genres"] = df_titles["genres"].apply( lambda x: x.replace("[", "").replace("]", "") ) df_titles["genres"] = df_titles["genres"].str.replace(r"[\'']", "") # Drop quotation marks and square brackets in'production_countries' df_titles["production_countries"] = df_titles["production_countries"].apply( lambda x: x.replace("[", "").replace("]", "") ) df_titles["production_countries"] = df_titles["production_countries"].str.replace( r"[\'']", "" ) # Drop imdb_id because no use df_titles.drop("imdb_id", 1, inplace=True) df_titles
[{"netflix-tv-shows-and-movies/titles.csv": {"column_names": "[\"id\", \"title\", \"type\", \"description\", \"release_year\", \"age_certification\", \"runtime\", \"genres\", \"production_countries\", \"seasons\", \"imdb_id\", \"imdb_score\", \"imdb_votes\", \"tmdb_popularity\", \"tmdb_score\"]", "column_data_types": "{\"id\": \"object\", \"title\": \"object\", \"type\": \"object\", \"description\": \"object\", \"release_year\": \"int64\", \"age_certification\": \"object\", \"runtime\": \"int64\", \"genres\": \"object\", \"production_countries\": \"object\", \"seasons\": \"float64\", \"imdb_id\": \"object\", \"imdb_score\": \"float64\", \"imdb_votes\": \"float64\", \"tmdb_popularity\": \"float64\", \"tmdb_score\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 5850 entries, 0 to 5849\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 5850 non-null object \n 1 title 5849 non-null object \n 2 type 5850 non-null object \n 3 description 5832 non-null object \n 4 release_year 5850 non-null int64 \n 5 age_certification 3231 non-null object \n 6 runtime 5850 non-null int64 \n 7 genres 5850 non-null object \n 8 production_countries 5850 non-null object \n 9 seasons 2106 non-null float64\n 10 imdb_id 5447 non-null object \n 11 imdb_score 5368 non-null float64\n 12 imdb_votes 5352 non-null float64\n 13 tmdb_popularity 5759 non-null float64\n 14 tmdb_score 5539 non-null float64\ndtypes: float64(5), int64(2), object(8)\nmemory usage: 685.7+ KB\n", "summary": "{\"release_year\": {\"count\": 5850.0, \"mean\": 2016.417094017094, \"std\": 6.937725712183742, \"min\": 1945.0, \"25%\": 2016.0, \"50%\": 2018.0, \"75%\": 2020.0, \"max\": 2022.0}, \"runtime\": {\"count\": 5850.0, \"mean\": 76.88888888888889, \"std\": 39.00250917525395, \"min\": 0.0, \"25%\": 44.0, \"50%\": 83.0, \"75%\": 104.0, \"max\": 240.0}, \"seasons\": {\"count\": 2106.0, \"mean\": 2.1628679962013297, \"std\": 2.6890413904714925, \"min\": 1.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 42.0}, \"imdb_score\": {\"count\": 5368.0, \"mean\": 6.510860655737705, \"std\": 1.1638263082409555, \"min\": 1.5, \"25%\": 5.8, \"50%\": 6.6, \"75%\": 7.3, \"max\": 9.6}, \"imdb_votes\": {\"count\": 5352.0, \"mean\": 23439.382473841553, \"std\": 95820.47090889506, \"min\": 5.0, \"25%\": 516.75, \"50%\": 2233.5, \"75%\": 9494.0, \"max\": 2294231.0}, \"tmdb_popularity\": {\"count\": 5759.0, \"mean\": 22.6379253956843, \"std\": 81.6802632085619, \"min\": 0.0094417458789051, \"25%\": 2.7285, \"50%\": 6.821, \"75%\": 16.59, \"max\": 2274.044}, \"tmdb_score\": {\"count\": 5539.0, \"mean\": 6.829174760787145, \"std\": 1.1703914445224128, \"min\": 0.5, \"25%\": 6.1, \"50%\": 6.9, \"75%\": 7.5375, \"max\": 10.0}}", "examples": "{\"id\":{\"0\":\"ts300399\",\"1\":\"tm84618\",\"2\":\"tm154986\",\"3\":\"tm127384\"},\"title\":{\"0\":\"Five Came Back: The Reference Films\",\"1\":\"Taxi Driver\",\"2\":\"Deliverance\",\"3\":\"Monty Python and the Holy Grail\"},\"type\":{\"0\":\"SHOW\",\"1\":\"MOVIE\",\"2\":\"MOVIE\",\"3\":\"MOVIE\"},\"description\":{\"0\":\"This collection includes 12 World War II-era propaganda films \\u2014 many of which are graphic and offensive \\u2014 discussed in the docuseries \\\"Five Came Back.\\\"\",\"1\":\"A mentally unstable Vietnam War veteran works as a night-time taxi driver in New York City where the perceived decadence and sleaze feed his urge for violent action.\",\"2\":\"Intent on seeing the Cahulawassee River before it's turned into one huge lake, outdoor fanatic Lewis Medlock takes his friends on a river-rafting trip they'll never forget into the dangerous American back-country.\",\"3\":\"King Arthur, accompanied by his squire, recruits his Knights of the Round Table, including Sir Bedevere the Wise, Sir Lancelot the Brave, Sir Robin the Not-Quite-So-Brave-As-Sir-Lancelot and Sir Galahad the Pure. On the way, Arthur battles the Black Knight who, despite having had all his limbs chopped off, insists he can still fight. They reach Camelot, but Arthur decides not to enter, as \\\"it is a silly place\\\".\"},\"release_year\":{\"0\":1945,\"1\":1976,\"2\":1972,\"3\":1975},\"age_certification\":{\"0\":\"TV-MA\",\"1\":\"R\",\"2\":\"R\",\"3\":\"PG\"},\"runtime\":{\"0\":51,\"1\":114,\"2\":109,\"3\":91},\"genres\":{\"0\":\"['documentation']\",\"1\":\"['drama', 'crime']\",\"2\":\"['drama', 'action', 'thriller', 'european']\",\"3\":\"['fantasy', 'action', 'comedy']\"},\"production_countries\":{\"0\":\"['US']\",\"1\":\"['US']\",\"2\":\"['US']\",\"3\":\"['GB']\"},\"seasons\":{\"0\":1.0,\"1\":null,\"2\":null,\"3\":null},\"imdb_id\":{\"0\":null,\"1\":\"tt0075314\",\"2\":\"tt0068473\",\"3\":\"tt0071853\"},\"imdb_score\":{\"0\":null,\"1\":8.2,\"2\":7.7,\"3\":8.2},\"imdb_votes\":{\"0\":null,\"1\":808582.0,\"2\":107673.0,\"3\":534486.0},\"tmdb_popularity\":{\"0\":0.6,\"1\":40.965,\"2\":10.01,\"3\":15.461},\"tmdb_score\":{\"0\":null,\"1\":8.179,\"2\":7.3,\"3\":7.811}}"}}, {"netflix-tv-shows-and-movies/credits.csv": {"column_names": "[\"person_id\", \"id\", \"name\", \"character\", \"role\"]", "column_data_types": "{\"person_id\": \"int64\", \"id\": \"object\", \"name\": \"object\", \"character\": \"object\", \"role\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 77801 entries, 0 to 77800\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 person_id 77801 non-null int64 \n 1 id 77801 non-null object\n 2 name 77801 non-null object\n 3 character 68029 non-null object\n 4 role 77801 non-null object\ndtypes: int64(1), object(4)\nmemory usage: 3.0+ MB\n", "summary": "{\"person_id\": {\"count\": 77801.0, \"mean\": 529488.8064420766, \"std\": 643016.6699575292, \"min\": 7.0, \"25%\": 45306.0, \"50%\": 198358.0, \"75%\": 888096.0, \"max\": 2462818.0}}", "examples": "{\"person_id\":{\"0\":3748,\"1\":14658,\"2\":7064,\"3\":3739},\"id\":{\"0\":\"tm84618\",\"1\":\"tm84618\",\"2\":\"tm84618\",\"3\":\"tm84618\"},\"name\":{\"0\":\"Robert De Niro\",\"1\":\"Jodie Foster\",\"2\":\"Albert Brooks\",\"3\":\"Harvey Keitel\"},\"character\":{\"0\":\"Travis Bickle\",\"1\":\"Iris Steensma\",\"2\":\"Tom\",\"3\":\"Matthew 'Sport' Higgins\"},\"role\":{\"0\":\"ACTOR\",\"1\":\"ACTOR\",\"2\":\"ACTOR\",\"3\":\"ACTOR\"}}"}}]
true
2
<start_data_description><data_path>netflix-tv-shows-and-movies/titles.csv: <column_names> ['id', 'title', 'type', 'description', 'release_year', 'age_certification', 'runtime', 'genres', 'production_countries', 'seasons', 'imdb_id', 'imdb_score', 'imdb_votes', 'tmdb_popularity', 'tmdb_score'] <column_types> {'id': 'object', 'title': 'object', 'type': 'object', 'description': 'object', 'release_year': 'int64', 'age_certification': 'object', 'runtime': 'int64', 'genres': 'object', 'production_countries': 'object', 'seasons': 'float64', 'imdb_id': 'object', 'imdb_score': 'float64', 'imdb_votes': 'float64', 'tmdb_popularity': 'float64', 'tmdb_score': 'float64'} <dataframe_Summary> {'release_year': {'count': 5850.0, 'mean': 2016.417094017094, 'std': 6.937725712183742, 'min': 1945.0, '25%': 2016.0, '50%': 2018.0, '75%': 2020.0, 'max': 2022.0}, 'runtime': {'count': 5850.0, 'mean': 76.88888888888889, 'std': 39.00250917525395, 'min': 0.0, '25%': 44.0, '50%': 83.0, '75%': 104.0, 'max': 240.0}, 'seasons': {'count': 2106.0, 'mean': 2.1628679962013297, 'std': 2.6890413904714925, 'min': 1.0, '25%': 1.0, '50%': 1.0, '75%': 2.0, 'max': 42.0}, 'imdb_score': {'count': 5368.0, 'mean': 6.510860655737705, 'std': 1.1638263082409555, 'min': 1.5, '25%': 5.8, '50%': 6.6, '75%': 7.3, 'max': 9.6}, 'imdb_votes': {'count': 5352.0, 'mean': 23439.382473841553, 'std': 95820.47090889506, 'min': 5.0, '25%': 516.75, '50%': 2233.5, '75%': 9494.0, 'max': 2294231.0}, 'tmdb_popularity': {'count': 5759.0, 'mean': 22.6379253956843, 'std': 81.6802632085619, 'min': 0.0094417458789051, '25%': 2.7285, '50%': 6.821, '75%': 16.59, 'max': 2274.044}, 'tmdb_score': {'count': 5539.0, 'mean': 6.829174760787145, 'std': 1.1703914445224128, 'min': 0.5, '25%': 6.1, '50%': 6.9, '75%': 7.5375, 'max': 10.0}} <dataframe_info> RangeIndex: 5850 entries, 0 to 5849 Data columns (total 15 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 5850 non-null object 1 title 5849 non-null object 2 type 5850 non-null object 3 description 5832 non-null object 4 release_year 5850 non-null int64 5 age_certification 3231 non-null object 6 runtime 5850 non-null int64 7 genres 5850 non-null object 8 production_countries 5850 non-null object 9 seasons 2106 non-null float64 10 imdb_id 5447 non-null object 11 imdb_score 5368 non-null float64 12 imdb_votes 5352 non-null float64 13 tmdb_popularity 5759 non-null float64 14 tmdb_score 5539 non-null float64 dtypes: float64(5), int64(2), object(8) memory usage: 685.7+ KB <some_examples> {'id': {'0': 'ts300399', '1': 'tm84618', '2': 'tm154986', '3': 'tm127384'}, 'title': {'0': 'Five Came Back: The Reference Films', '1': 'Taxi Driver', '2': 'Deliverance', '3': 'Monty Python and the Holy Grail'}, 'type': {'0': 'SHOW', '1': 'MOVIE', '2': 'MOVIE', '3': 'MOVIE'}, 'description': {'0': 'This collection includes 12 World War II-era propaganda films — many of which are graphic and offensive — discussed in the docuseries "Five Came Back."', '1': 'A mentally unstable Vietnam War veteran works as a night-time taxi driver in New York City where the perceived decadence and sleaze feed his urge for violent action.', '2': "Intent on seeing the Cahulawassee River before it's turned into one huge lake, outdoor fanatic Lewis Medlock takes his friends on a river-rafting trip they'll never forget into the dangerous American back-country.", '3': 'King Arthur, accompanied by his squire, recruits his Knights of the Round Table, including Sir Bedevere the Wise, Sir Lancelot the Brave, Sir Robin the Not-Quite-So-Brave-As-Sir-Lancelot and Sir Galahad the Pure. On the way, Arthur battles the Black Knight who, despite having had all his limbs chopped off, insists he can still fight. They reach Camelot, but Arthur decides not to enter, as "it is a silly place".'}, 'release_year': {'0': 1945, '1': 1976, '2': 1972, '3': 1975}, 'age_certification': {'0': 'TV-MA', '1': 'R', '2': 'R', '3': 'PG'}, 'runtime': {'0': 51, '1': 114, '2': 109, '3': 91}, 'genres': {'0': "['documentation']", '1': "['drama', 'crime']", '2': "['drama', 'action', 'thriller', 'european']", '3': "['fantasy', 'action', 'comedy']"}, 'production_countries': {'0': "['US']", '1': "['US']", '2': "['US']", '3': "['GB']"}, 'seasons': {'0': 1.0, '1': None, '2': None, '3': None}, 'imdb_id': {'0': None, '1': 'tt0075314', '2': 'tt0068473', '3': 'tt0071853'}, 'imdb_score': {'0': None, '1': 8.2, '2': 7.7, '3': 8.2}, 'imdb_votes': {'0': None, '1': 808582.0, '2': 107673.0, '3': 534486.0}, 'tmdb_popularity': {'0': 0.6, '1': 40.965, '2': 10.01, '3': 15.461}, 'tmdb_score': {'0': None, '1': 8.179, '2': 7.3, '3': 7.811}} <end_description> <start_data_description><data_path>netflix-tv-shows-and-movies/credits.csv: <column_names> ['person_id', 'id', 'name', 'character', 'role'] <column_types> {'person_id': 'int64', 'id': 'object', 'name': 'object', 'character': 'object', 'role': 'object'} <dataframe_Summary> {'person_id': {'count': 77801.0, 'mean': 529488.8064420766, 'std': 643016.6699575292, 'min': 7.0, '25%': 45306.0, '50%': 198358.0, '75%': 888096.0, 'max': 2462818.0}} <dataframe_info> RangeIndex: 77801 entries, 0 to 77800 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 person_id 77801 non-null int64 1 id 77801 non-null object 2 name 77801 non-null object 3 character 68029 non-null object 4 role 77801 non-null object dtypes: int64(1), object(4) memory usage: 3.0+ MB <some_examples> {'person_id': {'0': 3748, '1': 14658, '2': 7064, '3': 3739}, 'id': {'0': 'tm84618', '1': 'tm84618', '2': 'tm84618', '3': 'tm84618'}, 'name': {'0': 'Robert De Niro', '1': 'Jodie Foster', '2': 'Albert Brooks', '3': 'Harvey Keitel'}, 'character': {'0': 'Travis Bickle', '1': 'Iris Steensma', '2': 'Tom', '3': "Matthew 'Sport' Higgins"}, 'role': {'0': 'ACTOR', '1': 'ACTOR', '2': 'ACTOR', '3': 'ACTOR'}} <end_description>
855
0
3,682
855
129313251
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd # Read the CSV data file into a pandas DataFrame df = pd.read_csv("/kaggle/input/fedfunds-vs-gdp/gdp_fedfunds.csv") import numpy as np import statsmodels.api as sm # Add a constant to the DataFrame for the regression intercept df["const"] = 1 # Define the regression formula model = sm.OLS(df["value_gdp"], df[["const", "value_fedfunds"]]) # Fit the model to the data results = model.fit() # Print the regression results print(results.summary()) import matplotlib.pyplot as plt plt.figure(figsize=(10, 5)) plt.scatter(df["value_fedfunds"], df["value_gdp"]) plt.xlabel("Federal Funds Rate") plt.ylabel("GDP") plt.title("Scatter plot of Federal Funds Rate vs GDP") plt.grid(True) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/313/129313251.ipynb
null
null
[{"Id": 129313251, "ScriptId": 38446625, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4454153, "CreationDate": "05/12/2023 17:12:37", "VersionNumber": 1.0, "Title": "Fed Funds vs. GDP", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 75.0, "LinesInsertedFromPrevious": 75.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd # Read the CSV data file into a pandas DataFrame df = pd.read_csv("/kaggle/input/fedfunds-vs-gdp/gdp_fedfunds.csv") import numpy as np import statsmodels.api as sm # Add a constant to the DataFrame for the regression intercept df["const"] = 1 # Define the regression formula model = sm.OLS(df["value_gdp"], df[["const", "value_fedfunds"]]) # Fit the model to the data results = model.fit() # Print the regression results print(results.summary()) import matplotlib.pyplot as plt plt.figure(figsize=(10, 5)) plt.scatter(df["value_fedfunds"], df["value_gdp"]) plt.xlabel("Federal Funds Rate") plt.ylabel("GDP") plt.title("Scatter plot of Federal Funds Rate vs GDP") plt.grid(True) plt.show()
false
0
414
0
414
414
129287752
import pandas as pd import urllib.request from sklearn.feature_extraction.text import CountVectorizer import numpy as np from sklearn.decomposition import TruncatedSVD from sklearn.preprocessing import normalize from sklearn.metrics.pairwise import cosine_similarity # Download the datasets urllib.request.urlretrieve(url1, "vocab.enron.txt") urllib.request.urlretrieve(url2, "vocab.nips.txt") urllib.request.urlretrieve(url3, "vocab.kos.txt") vocab = pd.read_csv("vocab.enron.txt", sep="\t", header=None, names=["word"]) nips = pd.read_csv("vocab.nips.txt", sep="\t", header=None, names=["word"]) kos = pd.read_csv("vocab.kos.txt", sep="\t ", header=None, skiprows=3, names=["word"]) corpus = vocab["word"].str.cat(nips["word"]).str.cat(kos["word"]) corpus = corpus.dropna() count_vectorizer = CountVectorizer() X = count_vectorizer.fit_transform(corpus) svd = TruncatedSVD(n_components=100) X_svd = svd.fit_transform(X) print(X_svd[:, :10]) # ### i. Given that the dimensions from the reduced SVD are simply linear combinations of the original features, they might not be easily interpreted. Input for machine learning models or further analysis can be used with them, though. # ### ii. The most significant feature or pattern in the data is represented by the first dimension in the shortened SVD output. It accounts for the bulk of the data's variation. # ### iii. The most notable patterns in the data that account for a significant portion of the variation are represented by the top 10 dimensions in the shortened SVD output. The way you interpret these dimensions will vary depending on the particular facts and the issue you're seeking to resolve. To learn more about the underlying patterns, you might examine the words or documents that have high loadings on each dimension. vocab = vocab.dropna() nips = nips.dropna() kos = kos.dropna() vectorizer = CountVectorizer() matrix1 = vectorizer.fit_transform(vocab["word"]) matrix2 = vectorizer.fit_transform(nips["word"]) matrix3 = vectorizer.fit_transform(kos["word"]) # normalize the matrices norm_matrix1 = normalize(matrix1) norm_matrix2 = normalize(matrix2) norm_matrix3 = normalize(matrix3) # calculate cosine similarity matrices similarity_matrix1 = cosine_similarity(norm_matrix1) similarity_matrix2 = cosine_similarity(norm_matrix2) similarity_matrix3 = cosine_similarity(norm_matrix3) # calculate the average cosine similarity within each corpus avg_within_corpus1 = similarity_matrix1.mean() avg_within_corpus2 = similarity_matrix2.mean() avg_within_corpus3 = similarity_matrix3.mean() print("Average cosine similarity within corpus 1:", avg_within_corpus1) print("Average cosine similarity within corpus 2:", avg_within_corpus2) print("Average cosine similarity within corpus 3:", avg_within_corpus3) # ### e. The success of LSA as a method for clustering corpora will rely on the particular data and the issue you are attempting to address. The word-document matrix's dimensionality can be reduced using the dimensionality reduction approach called LSA, which can aid in grouping together similar texts. There are various clustering methods that might perform better for specific categories of data, therefore it might not be effective for all sorts of text data. # ### f. The word-document matrix's dimensionality can be decreased using the linear dimensionality reduction technique known as PCA. For clustering text data, PCA can be used instead of LSA. However, since PCA is not created expressly for text data, it might not perform as well as LSA in this area. Even though PCA's results may not match LSA's, they can be helpful for grouping corpora together. The word-document matrix can be represented in a low-dimensional manner by PCA in order to capture the most crucial data. PCA might not, however, capture the same data as LSA and might not work as well with text data. # ### In conclusion, LSA and PCA are both useful techniques for clustering corpora, but how well they work depends on the particular data and the issue at hand. To find the optimal strategy for your unique data, it can be required to test out several methods and algorithms. # ### Second Task # ### I have used wine data import time from sklearn.datasets import load_wine import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score wine = load_wine() X = wine.data y = wine.target y X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) num_cols = X.shape[1] pca_num = [int(num_cols / 10 * i) for i in range(1, 11)] pca_num.append(num_cols) for n in pca_num: pca = PCA(n_components=n) X_train_pca = pca.fit_transform(X_train) X_test_pca = pca.transform(X_test) print(f"PCA with {n} components") # Use Logistic Regression Classifier print("Logistic Regression Classifier:") start_time = time.time() lr = LogisticRegression(random_state=0, multi_class="ovr") lr.fit(X_train_pca, y_train) y_pred_lr = lr.predict(X_test_pca) acc_lr = accuracy_score(y_test, y_pred_lr) print(f"Accuracy: {acc_lr:.4f}") print(f"Time: {time.time()-start_time:.4f}s") # Use Decision Tree Classifier print("\nDecision Tree Classifier:") start_time = time.time() dtc = DecisionTreeClassifier(random_state=0) dtc.fit(X_train_pca, y_train) y_pred_dtc = dtc.predict(X_test_pca) acc_dtc = accuracy_score(y_test, y_pred_dtc) print(f"Accuracy: {acc_dtc:.4f}") print(f"Time: {time.time()-start_time:.4f}s") # Use K-Nearest Neighbors Classifier print("\nK-Nearest Neighbors Classifier:") start_time = time.time() knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train_pca, y_train) y_pred_knn = knn.predict(X_test_pca) acc_knn = accuracy_score(y_test, y_pred_knn) print(f"Accuracy: {acc_knn:.4f}") print(f"Time: {time.time()-start_time:.4f}s") print("-" * 50)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/287/129287752.ipynb
null
null
[{"Id": 129287752, "ScriptId": 38438853, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14388792, "CreationDate": "05/12/2023 13:18:06", "VersionNumber": 1.0, "Title": "Homework_2", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 152.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import urllib.request from sklearn.feature_extraction.text import CountVectorizer import numpy as np from sklearn.decomposition import TruncatedSVD from sklearn.preprocessing import normalize from sklearn.metrics.pairwise import cosine_similarity # Download the datasets urllib.request.urlretrieve(url1, "vocab.enron.txt") urllib.request.urlretrieve(url2, "vocab.nips.txt") urllib.request.urlretrieve(url3, "vocab.kos.txt") vocab = pd.read_csv("vocab.enron.txt", sep="\t", header=None, names=["word"]) nips = pd.read_csv("vocab.nips.txt", sep="\t", header=None, names=["word"]) kos = pd.read_csv("vocab.kos.txt", sep="\t ", header=None, skiprows=3, names=["word"]) corpus = vocab["word"].str.cat(nips["word"]).str.cat(kos["word"]) corpus = corpus.dropna() count_vectorizer = CountVectorizer() X = count_vectorizer.fit_transform(corpus) svd = TruncatedSVD(n_components=100) X_svd = svd.fit_transform(X) print(X_svd[:, :10]) # ### i. Given that the dimensions from the reduced SVD are simply linear combinations of the original features, they might not be easily interpreted. Input for machine learning models or further analysis can be used with them, though. # ### ii. The most significant feature or pattern in the data is represented by the first dimension in the shortened SVD output. It accounts for the bulk of the data's variation. # ### iii. The most notable patterns in the data that account for a significant portion of the variation are represented by the top 10 dimensions in the shortened SVD output. The way you interpret these dimensions will vary depending on the particular facts and the issue you're seeking to resolve. To learn more about the underlying patterns, you might examine the words or documents that have high loadings on each dimension. vocab = vocab.dropna() nips = nips.dropna() kos = kos.dropna() vectorizer = CountVectorizer() matrix1 = vectorizer.fit_transform(vocab["word"]) matrix2 = vectorizer.fit_transform(nips["word"]) matrix3 = vectorizer.fit_transform(kos["word"]) # normalize the matrices norm_matrix1 = normalize(matrix1) norm_matrix2 = normalize(matrix2) norm_matrix3 = normalize(matrix3) # calculate cosine similarity matrices similarity_matrix1 = cosine_similarity(norm_matrix1) similarity_matrix2 = cosine_similarity(norm_matrix2) similarity_matrix3 = cosine_similarity(norm_matrix3) # calculate the average cosine similarity within each corpus avg_within_corpus1 = similarity_matrix1.mean() avg_within_corpus2 = similarity_matrix2.mean() avg_within_corpus3 = similarity_matrix3.mean() print("Average cosine similarity within corpus 1:", avg_within_corpus1) print("Average cosine similarity within corpus 2:", avg_within_corpus2) print("Average cosine similarity within corpus 3:", avg_within_corpus3) # ### e. The success of LSA as a method for clustering corpora will rely on the particular data and the issue you are attempting to address. The word-document matrix's dimensionality can be reduced using the dimensionality reduction approach called LSA, which can aid in grouping together similar texts. There are various clustering methods that might perform better for specific categories of data, therefore it might not be effective for all sorts of text data. # ### f. The word-document matrix's dimensionality can be decreased using the linear dimensionality reduction technique known as PCA. For clustering text data, PCA can be used instead of LSA. However, since PCA is not created expressly for text data, it might not perform as well as LSA in this area. Even though PCA's results may not match LSA's, they can be helpful for grouping corpora together. The word-document matrix can be represented in a low-dimensional manner by PCA in order to capture the most crucial data. PCA might not, however, capture the same data as LSA and might not work as well with text data. # ### In conclusion, LSA and PCA are both useful techniques for clustering corpora, but how well they work depends on the particular data and the issue at hand. To find the optimal strategy for your unique data, it can be required to test out several methods and algorithms. # ### Second Task # ### I have used wine data import time from sklearn.datasets import load_wine import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score wine = load_wine() X = wine.data y = wine.target y X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) num_cols = X.shape[1] pca_num = [int(num_cols / 10 * i) for i in range(1, 11)] pca_num.append(num_cols) for n in pca_num: pca = PCA(n_components=n) X_train_pca = pca.fit_transform(X_train) X_test_pca = pca.transform(X_test) print(f"PCA with {n} components") # Use Logistic Regression Classifier print("Logistic Regression Classifier:") start_time = time.time() lr = LogisticRegression(random_state=0, multi_class="ovr") lr.fit(X_train_pca, y_train) y_pred_lr = lr.predict(X_test_pca) acc_lr = accuracy_score(y_test, y_pred_lr) print(f"Accuracy: {acc_lr:.4f}") print(f"Time: {time.time()-start_time:.4f}s") # Use Decision Tree Classifier print("\nDecision Tree Classifier:") start_time = time.time() dtc = DecisionTreeClassifier(random_state=0) dtc.fit(X_train_pca, y_train) y_pred_dtc = dtc.predict(X_test_pca) acc_dtc = accuracy_score(y_test, y_pred_dtc) print(f"Accuracy: {acc_dtc:.4f}") print(f"Time: {time.time()-start_time:.4f}s") # Use K-Nearest Neighbors Classifier print("\nK-Nearest Neighbors Classifier:") start_time = time.time() knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train_pca, y_train) y_pred_knn = knn.predict(X_test_pca) acc_knn = accuracy_score(y_test, y_pred_knn) print(f"Accuracy: {acc_knn:.4f}") print(f"Time: {time.time()-start_time:.4f}s") print("-" * 50)
false
0
1,783
0
1,783
1,783
129287504
# # Import and become one with the data import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import pandas as pd import urllib.request import zipfile # İndirilecek dosyanın URL'si url = "https://storage.googleapis.com/ztm_tf_course/food_vision/10_food_classes_all_data.zip" # Dosyanın indirilmesi urllib.request.urlretrieve(url, "10_food_classes_all_data.zip") # Zip dosyasının çıkarılması with zipfile.ZipFile("10_food_classes_all_data.zip", "r") as zip_ref: zip_ref.extractall() from IPython.lib.display import walk import os for dirpath, dirnames, filenames in os.walk("10_food_classes_all_data"): print( f"There are {len(dirnames)} directories and {len(filenames)} images is '{dirpath}'." ) data_dir = "10_food_classes_all_data/train" class_names = sorted(os.listdir(data_dir)) class_names train_dir = "10_food_classes_all_data/train" test_dir = "10_food_classes_all_data/test" # # Preprocess the data from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0) test_datagen = ImageDataGenerator(rescale=1 / 255.0) train_data = train_datagen.flow_from_directory( directory=train_dir, target_size=(224, 224), batch_size=32, class_mode="categorical" ) test_data = train_datagen.flow_from_directory( directory=test_dir, target_size=(224, 224), batch_size=32, class_mode="categorical" ) # # Create a model from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D from tensorflow.keras import Sequential model_1 = Sequential( [ Conv2D(filters=10, kernel_size=3, activation="relu", input_shape=(224, 224, 3)), Conv2D(10, 3, activation="relu"), MaxPool2D(), Conv2D(10, 3, activation="relu"), Conv2D(10, 3, activation="relu"), MaxPool2D(), Flatten(), Dense(10, activation="softmax"), ] ) model_1.compile( loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"], ) history_1 = model_1.fit( train_data, epochs=5, steps_per_epoch=len(train_data), validation_data=test_data, validation_steps=len(test_data), ) pd.DataFrame(history_1.history).plot(figsize=(12, 7)) def plot_loss_curves(history): # Plot loss train_loss = history.history["loss"] test_loss = history.history["val_loss"] epoc = range(len(history.history["loss"])) plt.plot(epoc, train_loss, label="Train-Loss") plt.plot(epoc, test_loss, label="Test-Loss") plt.title("Loss") plt.xlabel("Epochs") plt.legend() # Plot accuracy plt.figure() train_accuracy = history.history["accuracy"] test_accuracy = history.history["val_accuracy"] plt.plot(epoc, train_accuracy, label="Train Accuracy") plt.plot(epoc, test_accuracy, label="Test Accuracy") plt.title("Acuuracy") plt.xlabel("Epochs") plt.legend() plot_loss_curves(history_1) model_1.summary() # # Evaluate the model model_1.evaluate(test_data) # # Create a new model # ## Our model is overfitting, we need to prevent this train_data_augmented = ImageDataGenerator( rescale=1 / 255, rotation_range=0.2, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2, horizontal_flip=True, ) train_data_augmented = train_data_augmented.flow_from_directory( directory=train_dir, target_size=(224, 224), batch_size=32, class_mode="categorical" ) model_2 = tf.keras.models.clone_model(model_1) model_2.compile( loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"], ) history_2 = model_2.fit( train_data_augmented, epochs=5, steps_per_epoch=len(train_data_augmented), validation_data=test_data, validation_steps=len(test_data), ) plot_loss_curves(history_2) import urllib.request import matplotlib.pyplot as plt import matplotlib.image as mpimg url = "https://raw.githubusercontent.com/mrdbourke/tensorflow-deep-learning/main/images/03-steak.jpeg" urllib.request.urlretrieve(url, "03-steak.jpeg") steak = mpimg.imread("03-steak.jpeg") plt.imshow(steak) plt.axis(False) plt.show() steak.shape def load_and_pred_image(filname, img_shape=224): img = tf.io.read_file(filname) img = tf.image.decode_image(img, channels=3) img = tf.image.resize(img, size=[img_shape, img_shape]) img = img / 255.0 return img steak = load_and_pred_image("03-steak.jpeg") steak # # Make Predict pred = model_2.predict(tf.expand_dims(steak, axis=0)) pred pred_class = class_names[pred.argmax()] pred_class def pred_and_plot_model(model, filname, class_names): img = load_and_pred_image(filname) pred = model.predict(tf.expand_dims(img, axis=0)) pred_class = class_names[pred.argmax()] plt.imshow(img) plt.title(f"Predict:{pred_class}") plt.show() pred_and_plot_model(model_2, "03-steak.jpeg", class_names)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/287/129287504.ipynb
null
null
[{"Id": 129287504, "ScriptId": 38408564, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13923706, "CreationDate": "05/12/2023 13:15:54", "VersionNumber": 1.0, "Title": "Computer vision model with 10 different classes", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 170.0, "LinesInsertedFromPrevious": 170.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# # Import and become one with the data import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import pandas as pd import urllib.request import zipfile # İndirilecek dosyanın URL'si url = "https://storage.googleapis.com/ztm_tf_course/food_vision/10_food_classes_all_data.zip" # Dosyanın indirilmesi urllib.request.urlretrieve(url, "10_food_classes_all_data.zip") # Zip dosyasının çıkarılması with zipfile.ZipFile("10_food_classes_all_data.zip", "r") as zip_ref: zip_ref.extractall() from IPython.lib.display import walk import os for dirpath, dirnames, filenames in os.walk("10_food_classes_all_data"): print( f"There are {len(dirnames)} directories and {len(filenames)} images is '{dirpath}'." ) data_dir = "10_food_classes_all_data/train" class_names = sorted(os.listdir(data_dir)) class_names train_dir = "10_food_classes_all_data/train" test_dir = "10_food_classes_all_data/test" # # Preprocess the data from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0) test_datagen = ImageDataGenerator(rescale=1 / 255.0) train_data = train_datagen.flow_from_directory( directory=train_dir, target_size=(224, 224), batch_size=32, class_mode="categorical" ) test_data = train_datagen.flow_from_directory( directory=test_dir, target_size=(224, 224), batch_size=32, class_mode="categorical" ) # # Create a model from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D from tensorflow.keras import Sequential model_1 = Sequential( [ Conv2D(filters=10, kernel_size=3, activation="relu", input_shape=(224, 224, 3)), Conv2D(10, 3, activation="relu"), MaxPool2D(), Conv2D(10, 3, activation="relu"), Conv2D(10, 3, activation="relu"), MaxPool2D(), Flatten(), Dense(10, activation="softmax"), ] ) model_1.compile( loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"], ) history_1 = model_1.fit( train_data, epochs=5, steps_per_epoch=len(train_data), validation_data=test_data, validation_steps=len(test_data), ) pd.DataFrame(history_1.history).plot(figsize=(12, 7)) def plot_loss_curves(history): # Plot loss train_loss = history.history["loss"] test_loss = history.history["val_loss"] epoc = range(len(history.history["loss"])) plt.plot(epoc, train_loss, label="Train-Loss") plt.plot(epoc, test_loss, label="Test-Loss") plt.title("Loss") plt.xlabel("Epochs") plt.legend() # Plot accuracy plt.figure() train_accuracy = history.history["accuracy"] test_accuracy = history.history["val_accuracy"] plt.plot(epoc, train_accuracy, label="Train Accuracy") plt.plot(epoc, test_accuracy, label="Test Accuracy") plt.title("Acuuracy") plt.xlabel("Epochs") plt.legend() plot_loss_curves(history_1) model_1.summary() # # Evaluate the model model_1.evaluate(test_data) # # Create a new model # ## Our model is overfitting, we need to prevent this train_data_augmented = ImageDataGenerator( rescale=1 / 255, rotation_range=0.2, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2, horizontal_flip=True, ) train_data_augmented = train_data_augmented.flow_from_directory( directory=train_dir, target_size=(224, 224), batch_size=32, class_mode="categorical" ) model_2 = tf.keras.models.clone_model(model_1) model_2.compile( loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"], ) history_2 = model_2.fit( train_data_augmented, epochs=5, steps_per_epoch=len(train_data_augmented), validation_data=test_data, validation_steps=len(test_data), ) plot_loss_curves(history_2) import urllib.request import matplotlib.pyplot as plt import matplotlib.image as mpimg url = "https://raw.githubusercontent.com/mrdbourke/tensorflow-deep-learning/main/images/03-steak.jpeg" urllib.request.urlretrieve(url, "03-steak.jpeg") steak = mpimg.imread("03-steak.jpeg") plt.imshow(steak) plt.axis(False) plt.show() steak.shape def load_and_pred_image(filname, img_shape=224): img = tf.io.read_file(filname) img = tf.image.decode_image(img, channels=3) img = tf.image.resize(img, size=[img_shape, img_shape]) img = img / 255.0 return img steak = load_and_pred_image("03-steak.jpeg") steak # # Make Predict pred = model_2.predict(tf.expand_dims(steak, axis=0)) pred pred_class = class_names[pred.argmax()] pred_class def pred_and_plot_model(model, filname, class_names): img = load_and_pred_image(filname) pred = model.predict(tf.expand_dims(img, axis=0)) pred_class = class_names[pred.argmax()] plt.imshow(img) plt.title(f"Predict:{pred_class}") plt.show() pred_and_plot_model(model_2, "03-steak.jpeg", class_names)
false
0
1,665
1
1,665
1,665
129659332
<jupyter_start><jupyter_text>Indian Food 101 ### Content Indian cuisine consists of a variety of regional and traditional cuisines native to the Indian subcontinent. Given the diversity in soil, climate, culture, ethnic groups, and occupations, these cuisines vary substantially and use locally available spices, herbs, vegetables, and fruits. Indian food is also heavily influenced by religion, in particular Hinduism, cultural choices and traditions. This dataset consists of information about various **Indian dishes**, their **ingredients**, their **place of origin**, etc. ### Column Description **name** : name of the dish **ingredients** : main ingredients used **diet** : type of diet - either vegetarian or non vegetarian **prep_time** : preparation time **cook_time** : cooking time **flavor_profile** : flavor profile includes whether the dish is spicy, sweet, bitter, etc **course** : course of meal - starter, main course, dessert, etc **state** : state where the dish is famous or is originated **region** : region where the state belongs Presence of -1 in any of the columns indicates NaN value. Kaggle dataset identifier: indian-food-101 <jupyter_code>import pandas as pd df = pd.read_csv('indian-food-101/indian_food.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 255 entries, 0 to 254 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 name 255 non-null object 1 ingredients 255 non-null object 2 diet 255 non-null object 3 prep_time 255 non-null int64 4 cook_time 255 non-null int64 5 flavor_profile 255 non-null object 6 course 255 non-null object 7 state 255 non-null object 8 region 254 non-null object dtypes: int64(2), object(7) memory usage: 18.1+ KB <jupyter_text>Examples: { "name": "Balu shahi", "ingredients": "Maida flour, yogurt, oil, sugar", "diet": "vegetarian", "prep_time": 45, "cook_time": 25, "flavor_profile": "sweet", "course": "dessert", "state": "West Bengal", "region": "East" } { "name": "Boondi", "ingredients": "Gram flour, ghee, sugar", "diet": "vegetarian", "prep_time": 80, "cook_time": 30, "flavor_profile": "sweet", "course": "dessert", "state": "Rajasthan", "region": "West" } { "name": "Gajar ka halwa", "ingredients": "Carrots, milk, sugar, ghee, cashews, raisins", "diet": "vegetarian", "prep_time": 15, "cook_time": 60, "flavor_profile": "sweet", "course": "dessert", "state": "Punjab", "region": "North" } { "name": "Ghevar", "ingredients": "Flour, ghee, kewra, milk, clarified butter, sugar, almonds, pistachio, saffron, green cardamom", "diet": "vegetarian", "prep_time": 15, "cook_time": 30, "flavor_profile": "sweet", "course": "dessert", "state": "Rajasthan", "region": "West" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_csv("/kaggle/input/indian-food-101/indian_food.csv") data # Unique values for each columns # print(pd.unique(data.ingredients)) print(pd.unique(data.flavor_profile)) print(pd.unique(data.course)) # print(pd.unique(data.diet)) # print(pd.unique(data.state)) ### data.loc[data['state'] == '-1'] # Cleaning Data # data.loc[data['flavor_profile']== '-1'] # data.loc[data['name']=='Copra paak','flavor_profile'] data.loc[ data["name"].isin(["Copra paak", "Puttu", "Kansar"]), "flavor_profile" ] = "sweet" data.loc[data["flavor_profile"] == "-1", "flavor_profile"] = "other" # Cleaned values data.tail(10) # Performing One hot encoding discrete_df = pd.get_dummies(data, columns=["ingredients", "diet", "flavor_profile"]) discrete_df target = data["course"] df = discrete_df.drop(columns=["course", "state", "region", "name"], axis=1) df # # **Label Encoding** # Converting target string to numeric labels from sklearn.preprocessing import LabelEncoder le = LabelEncoder() label = le.fit_transform(data["course"]) label # 0='dessert' 1='main course' 2='starter' 3='snack' # # **Train Test Split** # Spliting data inro training set and testing set from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split( df, label, test_size=0.2, random_state=0 ) # len(X_test) # # **Decision Tree Algorithm** # implementing classification algorithm from sklearn.tree import DecisionTreeClassifier model1 = DecisionTreeClassifier() model1.fit(X_train, Y_train) # 0='dessert' 1='main course' 2='starter' 3='snack' ypred = model1.predict(X_test) ypredct = le.inverse_transform(ypred) # print(ypredct) # Checking accuracy accuracy1 = model1.score(X_test, Y_test) print(accuracy1) from sklearn.metrics import classification_report report1 = classification_report(Y_test, ypred) print(type(report1)) # report=classification_report(Y_test, ypred,output_dict=True) # macro_precision = report['macro avg']['precision'] # macro_precision from sklearn import metrics confusion_matrix = metrics.confusion_matrix(Y_test, ypred) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=confusion_matrix, display_labels=[False, True] ) import matplotlib.pyplot as plt cm_display.plot() plt.show() # # **Random Forest Algorithm** from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split clf = RandomForestClassifier(n_estimators=2, min_samples_split=3, min_samples_leaf=2) clf.fit(X_train, Y_train) pred_clf = clf.predict(X_test) accuracyN = clf.score(X_test, Y_test) print(accuracyN) reportN = classification_report(Y_test, pred_clf) print(reportN) from sklearn.model_selection import GridSearchCV param_grid = { "n_estimators": [2, 5, 10, 20], "min_samples_split": [2, 3], "min_samples_leaf": [1, 2, 3], } grid_search = GridSearchCV(estimator=clf, param_grid=param_grid) grid_search.fit(X_train, Y_train) grid_search.best_params_ # # **Random Forest with Updated Parameters** model2 = RandomForestClassifier( n_estimators=20, min_samples_split=2, min_samples_leaf=1 ) model2.fit(X_train, Y_train) ypred2 = clf2.predict(X_test) accuracy2 = clf2.score(X_test, Y_test) print(accuracy2) print(classification_report(Y_test, ypred2)) # # **Logistic Regression Algorithm** from sklearn.preprocessing import MinMaxScaler from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline model3 = Pipeline([("minmax", MinMaxScaler()), ("lr", LogisticRegression())]) model3.fit(X_train, Y_train) ypred3 = pipe_lr.predict(X_test) accuracy3 = pipe_lr.score(X_test, Y_test) print(accuracy3) print(classification_report(Y_test, ypred3)) plt.figure(figsize=(5, 3)) plt.bar( ["Decision Tree", "Random Forest", "Logistic regression"], [accuracy1, accuracy2, accuracy3], width=0.4, ) plt.title("Accuracy Comparison") plt.ylabel("Accuracy") plt.ylim(0.0, 1.0) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/659/129659332.ipynb
indian-food-101
nehaprabhavalkar
[{"Id": 129659332, "ScriptId": 35527800, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11420283, "CreationDate": "05/15/2023 14:38:22", "VersionNumber": 3.0, "Title": "DM_project", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 150.0, "LinesInsertedFromPrevious": 88.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 62.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185960981, "KernelVersionId": 129659332, "SourceDatasetVersionId": 1526436}]
[{"Id": 1526436, "DatasetId": 865197, "DatasourceVersionId": 1560856, "CreatorUserId": 3885917, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "09/30/2020 06:23:43", "VersionNumber": 2.0, "Title": "Indian Food 101", "Slug": "indian-food-101", "Subtitle": "Data about 255 traditional and famous dishes in India", "Description": "### Content\n\nIndian cuisine consists of a variety of regional and traditional cuisines native to the Indian subcontinent. Given the diversity in soil, climate, culture, ethnic groups, and occupations, these cuisines vary substantially and use locally available spices, herbs, vegetables, and fruits. Indian food is also heavily influenced by religion, in particular Hinduism, cultural choices and traditions.\n\nThis dataset consists of information about various **Indian dishes**, their **ingredients**, their **place of origin**, etc.\n\n### Column Description\n\n**name** : name of the dish\n\n**ingredients** : main ingredients used\n\n**diet** : type of diet - either vegetarian or non vegetarian\n\n**prep_time** : preparation time\n\n**cook_time** : cooking time\n\n**flavor_profile** : flavor profile includes whether the dish is spicy, sweet, bitter, etc\n\n**course** : course of meal - starter, main course, dessert, etc\n\n**state** : state where the dish is famous or is originated\n\n**region** : region where the state belongs\n\nPresence of -1 in any of the columns indicates NaN value.\n\n### Acknowledgements\n\nhttps://www.wikipedia.org/\nhttps://hebbarskitchen.com/\nhttps://www.archanaskitchen.com/\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F3885917%2Fa849f4fddf79a836d4ea0539286e3df9%2Fzxl2vnp_1457603881_725x725.jpg?generation=1603611234465596&alt=media)", "VersionNotes": "Version 2", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 865197, "CreatorUserId": 3885917, "OwnerUserId": 3885917.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1526436.0, "CurrentDatasourceVersionId": 1560856.0, "ForumId": 880553, "Type": 2, "CreationDate": "09/09/2020 07:36:01", "LastActivityDate": "09/09/2020", "TotalViews": 179626, "TotalDownloads": 22751, "TotalVotes": 529, "TotalKernels": 166}]
[{"Id": 3885917, "UserName": "nehaprabhavalkar", "DisplayName": "Neha Prabhavalkar", "RegisterDate": "10/19/2019", "PerformanceTier": 3}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_csv("/kaggle/input/indian-food-101/indian_food.csv") data # Unique values for each columns # print(pd.unique(data.ingredients)) print(pd.unique(data.flavor_profile)) print(pd.unique(data.course)) # print(pd.unique(data.diet)) # print(pd.unique(data.state)) ### data.loc[data['state'] == '-1'] # Cleaning Data # data.loc[data['flavor_profile']== '-1'] # data.loc[data['name']=='Copra paak','flavor_profile'] data.loc[ data["name"].isin(["Copra paak", "Puttu", "Kansar"]), "flavor_profile" ] = "sweet" data.loc[data["flavor_profile"] == "-1", "flavor_profile"] = "other" # Cleaned values data.tail(10) # Performing One hot encoding discrete_df = pd.get_dummies(data, columns=["ingredients", "diet", "flavor_profile"]) discrete_df target = data["course"] df = discrete_df.drop(columns=["course", "state", "region", "name"], axis=1) df # # **Label Encoding** # Converting target string to numeric labels from sklearn.preprocessing import LabelEncoder le = LabelEncoder() label = le.fit_transform(data["course"]) label # 0='dessert' 1='main course' 2='starter' 3='snack' # # **Train Test Split** # Spliting data inro training set and testing set from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split( df, label, test_size=0.2, random_state=0 ) # len(X_test) # # **Decision Tree Algorithm** # implementing classification algorithm from sklearn.tree import DecisionTreeClassifier model1 = DecisionTreeClassifier() model1.fit(X_train, Y_train) # 0='dessert' 1='main course' 2='starter' 3='snack' ypred = model1.predict(X_test) ypredct = le.inverse_transform(ypred) # print(ypredct) # Checking accuracy accuracy1 = model1.score(X_test, Y_test) print(accuracy1) from sklearn.metrics import classification_report report1 = classification_report(Y_test, ypred) print(type(report1)) # report=classification_report(Y_test, ypred,output_dict=True) # macro_precision = report['macro avg']['precision'] # macro_precision from sklearn import metrics confusion_matrix = metrics.confusion_matrix(Y_test, ypred) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=confusion_matrix, display_labels=[False, True] ) import matplotlib.pyplot as plt cm_display.plot() plt.show() # # **Random Forest Algorithm** from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split clf = RandomForestClassifier(n_estimators=2, min_samples_split=3, min_samples_leaf=2) clf.fit(X_train, Y_train) pred_clf = clf.predict(X_test) accuracyN = clf.score(X_test, Y_test) print(accuracyN) reportN = classification_report(Y_test, pred_clf) print(reportN) from sklearn.model_selection import GridSearchCV param_grid = { "n_estimators": [2, 5, 10, 20], "min_samples_split": [2, 3], "min_samples_leaf": [1, 2, 3], } grid_search = GridSearchCV(estimator=clf, param_grid=param_grid) grid_search.fit(X_train, Y_train) grid_search.best_params_ # # **Random Forest with Updated Parameters** model2 = RandomForestClassifier( n_estimators=20, min_samples_split=2, min_samples_leaf=1 ) model2.fit(X_train, Y_train) ypred2 = clf2.predict(X_test) accuracy2 = clf2.score(X_test, Y_test) print(accuracy2) print(classification_report(Y_test, ypred2)) # # **Logistic Regression Algorithm** from sklearn.preprocessing import MinMaxScaler from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline model3 = Pipeline([("minmax", MinMaxScaler()), ("lr", LogisticRegression())]) model3.fit(X_train, Y_train) ypred3 = pipe_lr.predict(X_test) accuracy3 = pipe_lr.score(X_test, Y_test) print(accuracy3) print(classification_report(Y_test, ypred3)) plt.figure(figsize=(5, 3)) plt.bar( ["Decision Tree", "Random Forest", "Logistic regression"], [accuracy1, accuracy2, accuracy3], width=0.4, ) plt.title("Accuracy Comparison") plt.ylabel("Accuracy") plt.ylim(0.0, 1.0) plt.show()
[{"indian-food-101/indian_food.csv": {"column_names": "[\"name\", \"ingredients\", \"diet\", \"prep_time\", \"cook_time\", \"flavor_profile\", \"course\", \"state\", \"region\"]", "column_data_types": "{\"name\": \"object\", \"ingredients\": \"object\", \"diet\": \"object\", \"prep_time\": \"int64\", \"cook_time\": \"int64\", \"flavor_profile\": \"object\", \"course\": \"object\", \"state\": \"object\", \"region\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 255 entries, 0 to 254\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 name 255 non-null object\n 1 ingredients 255 non-null object\n 2 diet 255 non-null object\n 3 prep_time 255 non-null int64 \n 4 cook_time 255 non-null int64 \n 5 flavor_profile 255 non-null object\n 6 course 255 non-null object\n 7 state 255 non-null object\n 8 region 254 non-null object\ndtypes: int64(2), object(7)\nmemory usage: 18.1+ KB\n", "summary": "{\"prep_time\": {\"count\": 255.0, \"mean\": 31.105882352941176, \"std\": 72.55440915682755, \"min\": -1.0, \"25%\": 10.0, \"50%\": 10.0, \"75%\": 20.0, \"max\": 500.0}, \"cook_time\": {\"count\": 255.0, \"mean\": 34.529411764705884, \"std\": 48.26564979817446, \"min\": -1.0, \"25%\": 20.0, \"50%\": 30.0, \"75%\": 40.0, \"max\": 720.0}}", "examples": "{\"name\":{\"0\":\"Balu shahi\",\"1\":\"Boondi\",\"2\":\"Gajar ka halwa\",\"3\":\"Ghevar\"},\"ingredients\":{\"0\":\"Maida flour, yogurt, oil, sugar\",\"1\":\"Gram flour, ghee, sugar\",\"2\":\"Carrots, milk, sugar, ghee, cashews, raisins\",\"3\":\"Flour, ghee, kewra, milk, clarified butter, sugar, almonds, pistachio, saffron, green cardamom\"},\"diet\":{\"0\":\"vegetarian\",\"1\":\"vegetarian\",\"2\":\"vegetarian\",\"3\":\"vegetarian\"},\"prep_time\":{\"0\":45,\"1\":80,\"2\":15,\"3\":15},\"cook_time\":{\"0\":25,\"1\":30,\"2\":60,\"3\":30},\"flavor_profile\":{\"0\":\"sweet\",\"1\":\"sweet\",\"2\":\"sweet\",\"3\":\"sweet\"},\"course\":{\"0\":\"dessert\",\"1\":\"dessert\",\"2\":\"dessert\",\"3\":\"dessert\"},\"state\":{\"0\":\"West Bengal\",\"1\":\"Rajasthan\",\"2\":\"Punjab\",\"3\":\"Rajasthan\"},\"region\":{\"0\":\"East\",\"1\":\"West\",\"2\":\"North\",\"3\":\"West\"}}"}}]
true
1
<start_data_description><data_path>indian-food-101/indian_food.csv: <column_names> ['name', 'ingredients', 'diet', 'prep_time', 'cook_time', 'flavor_profile', 'course', 'state', 'region'] <column_types> {'name': 'object', 'ingredients': 'object', 'diet': 'object', 'prep_time': 'int64', 'cook_time': 'int64', 'flavor_profile': 'object', 'course': 'object', 'state': 'object', 'region': 'object'} <dataframe_Summary> {'prep_time': {'count': 255.0, 'mean': 31.105882352941176, 'std': 72.55440915682755, 'min': -1.0, '25%': 10.0, '50%': 10.0, '75%': 20.0, 'max': 500.0}, 'cook_time': {'count': 255.0, 'mean': 34.529411764705884, 'std': 48.26564979817446, 'min': -1.0, '25%': 20.0, '50%': 30.0, '75%': 40.0, 'max': 720.0}} <dataframe_info> RangeIndex: 255 entries, 0 to 254 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 name 255 non-null object 1 ingredients 255 non-null object 2 diet 255 non-null object 3 prep_time 255 non-null int64 4 cook_time 255 non-null int64 5 flavor_profile 255 non-null object 6 course 255 non-null object 7 state 255 non-null object 8 region 254 non-null object dtypes: int64(2), object(7) memory usage: 18.1+ KB <some_examples> {'name': {'0': 'Balu shahi', '1': 'Boondi', '2': 'Gajar ka halwa', '3': 'Ghevar'}, 'ingredients': {'0': 'Maida flour, yogurt, oil, sugar', '1': 'Gram flour, ghee, sugar', '2': 'Carrots, milk, sugar, ghee, cashews, raisins', '3': 'Flour, ghee, kewra, milk, clarified butter, sugar, almonds, pistachio, saffron, green cardamom'}, 'diet': {'0': 'vegetarian', '1': 'vegetarian', '2': 'vegetarian', '3': 'vegetarian'}, 'prep_time': {'0': 45, '1': 80, '2': 15, '3': 15}, 'cook_time': {'0': 25, '1': 30, '2': 60, '3': 30}, 'flavor_profile': {'0': 'sweet', '1': 'sweet', '2': 'sweet', '3': 'sweet'}, 'course': {'0': 'dessert', '1': 'dessert', '2': 'dessert', '3': 'dessert'}, 'state': {'0': 'West Bengal', '1': 'Rajasthan', '2': 'Punjab', '3': 'Rajasthan'}, 'region': {'0': 'East', '1': 'West', '2': 'North', '3': 'West'}} <end_description>
1,500
0
2,481
1,500
129659135
# GeoJson file load import json JPgeo = json.load(open("/kaggle/input/jpgeojson/japan.geojson")) JPgeo["features"][0]["properties"] JPgeo["features"][0]["geometry"] import folium folium.Map(location=[35.6938, 139.7035], zoom_start=10) import folium JPmap = folium.Map(location=[35.6938, 139.7035], zoom_start=10, tiles="cartodbpositron") folium.Choropleth(geo_data=JPgeo).add_to(JPmap) JPmap
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/659/129659135.ipynb
null
null
[{"Id": 129659135, "ScriptId": 38510275, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14391720, "CreationDate": "05/15/2023 14:36:53", "VersionNumber": 1.0, "Title": "MapVisualizing", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 17.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# GeoJson file load import json JPgeo = json.load(open("/kaggle/input/jpgeojson/japan.geojson")) JPgeo["features"][0]["properties"] JPgeo["features"][0]["geometry"] import folium folium.Map(location=[35.6938, 139.7035], zoom_start=10) import folium JPmap = folium.Map(location=[35.6938, 139.7035], zoom_start=10, tiles="cartodbpositron") folium.Choropleth(geo_data=JPgeo).add_to(JPmap) JPmap
false
0
165
0
165
165
129723093
<jupyter_start><jupyter_text>Netflix Data: Cleaning, Analysis and Visualization Netflix is a popular streaming service that offers a vast catalog of movies, TV shows, and original contents. This dataset is a cleaned version of the original version which can be found [here](https://www.kaggle.com/datasets/shivamb/netflix-shows). The data consist of contents added to Netflix from 2008 to 2021. The oldest content is as old as 1925 and the newest as 2021. This dataset will be cleaned with PostgreSQL and visualized with Tableau. The purpose of this dataset is to test my data cleaning and visualization skills. The cleaned data can be found below and the Tableau dashboard can be found [here](https://public.tableau.com/app/profile/abdulrasaq.ariyo/viz/NetflixTVShowsMovies_16615029026580/NetflixDashboard) . ## Data Cleaning We are going to: 1. Treat the Nulls 2. Treat the duplicates 3. Populate missing rows 4. Drop unneeded columns 5. Split columns Extra steps and more explanation on the process will be explained through the code comments ``` --View dataset SELECT * FROM netflix; ``` ``` --The show_id column is the unique id for the dataset, therefore we are going to check for duplicates SELECT show_id, COUNT(*) FROM netflix GROUP BY show_id ORDER BY show_id DESC; --No duplicates ``` ``` --Check null values across columns SELECT COUNT(*) FILTER (WHERE show_id IS NULL) AS showid_nulls, COUNT(*) FILTER (WHERE type IS NULL) AS type_nulls, COUNT(*) FILTER (WHERE title IS NULL) AS title_nulls, COUNT(*) FILTER (WHERE director IS NULL) AS director_nulls, COUNT(*) FILTER (WHERE movie_cast IS NULL) AS movie_cast_nulls, COUNT(*) FILTER (WHERE country IS NULL) AS country_nulls, COUNT(*) FILTER (WHERE date_added IS NULL) AS date_addes_nulls, COUNT(*) FILTER (WHERE release_year IS NULL) AS release_year_nulls, COUNT(*) FILTER (WHERE rating IS NULL) AS rating_nulls, COUNT(*) FILTER (WHERE duration IS NULL) AS duration_nulls, COUNT(*) FILTER (WHERE listed_in IS NULL) AS listed_in_nulls, COUNT(*) FILTER (WHERE description IS NULL) AS description_nulls FROM netflix; ``` ``` We can see that there are NULLS. director_nulls = 2634 movie_cast_nulls = 825 country_nulls = 831 date_added_nulls = 10 rating_nulls = 4 duration_nulls = 3 ``` The director column nulls is about 30% of the whole column, therefore I will not delete them. I will rather find another column to populate it. To populate the director column, we want to find out if there is relationship between movie_cast column and director column ``` -- Below, we find out if some directors are likely to work with particular cast WITH cte AS ( SELECT title, CONCAT(director, '---', movie_cast) AS director_cast FROM netflix ) SELECT director_cast, COUNT(*) AS count FROM cte GROUP BY director_cast HAVING COUNT(*) &gt; 1 ORDER BY COUNT(*) DESC; With this, we can now populate NULL rows in directors using their record with movie_cast ``` ``` UPDATE netflix SET director = 'Alastair Fothergill' WHERE movie_cast = 'David Attenborough' AND director IS NULL ; --Repeat this step to populate the rest of the director nulls --Populate the rest of the NULL in director as "Not Given" UPDATE netflix SET director = 'Not Given' WHERE director IS NULL; --When I was doing this, I found a less complex and faster way to populate a column which I will use next ``` Just like the director column, I will not delete the nulls in country. Since the country column is related to director and movie, we are going to populate the country column with the director column ``` --Populate the country using the director column SELECT COALESCE(nt.country,nt2.country) FROM netflix AS nt JOIN netflix AS nt2 ON nt.director = nt2.director AND nt.show_id &lt;&gt; nt2.show_id WHERE nt.country IS NULL; UPDATE netflix SET country = nt2.country FROM netflix AS nt2 WHERE netflix.director = nt2.director and netflix.show_id &lt;&gt; nt2.show_id AND netflix.country IS NULL; --To confirm if there are still directors linked to country that refuse to update SELECT director, country, date_added FROM netflix WHERE country IS NULL; --Populate the rest of the NULL in director as "Not Given" UPDATE netflix SET country = 'Not Given' WHERE country IS NULL; ``` The date_added rows nulls is just 10 out of over 8000 rows, deleting them cannot affect our analysis or visualization ``` --Show date_added nulls SELECT show_id, date_added FROM netflix_clean WHERE date_added IS NULL; --DELETE nulls DELETE FROM netflix WHERE show_id IN ('6797', 's6067', 's6175', 's6807', 's6902', 's7255', 's7197', 's7407', 's7848', 's8183'); ``` rating nulls is 4. Delete them ``` --Show rating NULLS SELECT show_id, rating FROM netflix_clean WHERE date_added IS NULL; --Delete the nulls, and show deleted fields DELETE FROM netflix WHERE show_id IN (SELECT show_id FROM netflix WHERE rating IS NULL) RETURNING *; ``` --duration nulls is 4. Delete them ``` DELETE FROM netflix WHERE show_id IN (SELECT show_id FROM netflix WHERE duration IS NULL); ``` Now run the query to show the number of nulls in each column to confirm if there are still nulls. After this, run the query to confirm the row number in each column is the same ``` --Check to confirm the number of rows are the same(NO NULL) SELECT count(*) filter (where show_id IS NOT NULL) AS showid_nulls, count(*) filter (where type IS NOT NULL) AS type_nulls, count(*) filter (where title IS NOT NULL) AS title_nulls, count(*) filter (where director IS NOT NULL) AS director_nulls, count(*) filter (where country IS NOT NULL) AS country_nulls, count(*) filter (where date_added IS NOT NULL) AS date_addes_nulls, count(*) filter (where release_year IS NOT NULL) AS release_year_nulls, count(*) filter (where rating IS NOT NULL) AS rating_nulls, count(*) filter (where duration IS NOT NULL) AS duration_nulls, count(*) filter (where listed_in IS NOT NULL) AS listed_in_nulls FROM netflix; --Total number of rows are the same in all columns ``` We can drop the description and movie_cast column because they are not needed for our analysis or visualization task. ``` --DROP unneeded columns ALTER TABLE netflix DROP COLUMN movie_cast, DROP COLUMN description; ``` Some of the rows in country column has multiple countries, for my visualization, I only need one country per row to make my map visualization clean and easy. Therefore, I am going to split the country column and retain the first country by the left which I believe is the original country of the movie ``` SELECT *, SPLIT_PART(country,',',1) AS countryy, SPLIT_PART(country,',',2), SPLIT_PART(country,',',4), SPLIT_PART(country,',',5), SPLIT_PART(country,',',6), SPLIT_PART(country,',',7), SPLIT_PART(country,',',8), SPLIT_PART(country,',',9), SPLIT_PART(country,',',10) FROM netflix; -- NOW lets update the table ALTER TABLE netflix ADD country1 varchar(500); UPDATE netflix SET country1 = SPLIT_PART(country, ',', 1); --This will create a column named country1 and Update it with the first split country. ``` Delete the country column that has multiple country entries ``` --Delete column ALTER TABLE netflix DROP COLUMN country; ``` Rename the country1 column to country ``` ALTER TABLE netflix RENAME COLUMN country1 TO country; ``` ## Data Visualization After cleaning, the dataset is set for some analysis and visualization with Tableau. **Note: In the visualization captions, Contents means Movies and TV shows, and Content may either mean Movie or TV Show**. **Sheet 1. Content type in percentage** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F1c95dee22870057541bc3c1cce7b1a36%2FType%20percent.png?generation=1661603826265148&alt=media) This first sheet shows the two categories of content in the dataset which are Movie and Tv show. - As we can see the majority of the content is Movie which takes 69.9%. - There are more details in the tooltip which shows the exact count of Movie and Tv show **Sheet 2. Movie & TV Show by Country** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F511f5a3f07f2fa9d8faea77d1dd21180%2FNumber%20of%20shows%20by%20map.png?generation=1661604888232420&alt=media) This shows the the total amount of Movies and Tv shows per country within the given period of time(2008 - 2021). This can be noted by the size of the coloured circle in the map. - We can see that the United State of America has the largest size, followed by India and the United Kingdom. - In the Tableau hosted dashboard/sheet, there is a filter for the years between 2008 and 2021 to calculate yearly record. To give an alternate and a clearer view. Movie & TV shows by country bar chart is below ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F64e9f79965e62a4bb63b04acc835a07c%2FNumber%20of%20shows%20by%20bar.png?generation=1661609485785468&alt=media) **Sheet 3. Number of Contents Added through the Years** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2Fe02290507d0be382870f6651e3682cd1%2FNumber%20of%20Contents%20added%20by%20year.png?generation=1661605691430129&alt=media) This time series chart shows the total number of contents added to Netflix all through the given years (2008 - 2021) - It shows that most movies and tv shows on Netflix were added in 2019 - In the Tableau sheet, there is a filter to know how much Movies and Tv shows were added in each month of the year **Sheet 4. Top Directors** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F0fa58900b62df123b690da63b6111a3a%2FDirector.png?generation=1661606812783874&alt=media) This chart shows the top 10 directors with most contents on Netflix. This char shows the count of Movie and Tv shows in their catalouge. - We can see that most of these directors contents are movies. - We can also note that the duo of Raul Campos and Jan Suter are fond of working together and have directed 18 movies on Netflix. **Sheet 5. Top Genres** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2Fbc458d5885c3d7bcd3e690962c5cc2c3%2FTop%20Genres.png?generation=1661607262740686&alt=media) This chart shows the genres with the highest numbers on Netflix. - We can see that Drama & International movies followed by Documentary have the highest number of contents on Netflix within the period. **Sheet 6. Top Ratings** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F181df392cb0006559cd9fb19a29cadef%2FRating.png?generation=1661607535247137&alt=media) Rating is a system to rate motion picture's suitability for certain audiences based on its content. This chart shows the top ratings on Netflix -We can note that most contents on Netflix are rated TV-MA. TV-MA in the United States by the TV Parental Guidelines signifies content for mature audiences. **Sheet 7. Oldest Contents on Netflix by Content Release year** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F263493038e8dacd330c9e54aed2c467b%2FOldest%20shows%20on%20netflix.png?generation=1661607864455871&alt=media) This table shows the 10 oldest movies and tv shows on Netflix - The oldest is as old as 1925 **Sheet 8. Content Types over the Years** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F520da629aceef21a7af890198897c58e%2FContent%20Type%20over%20the%20years.png?generation=1661608071825961&alt=media) This line chart compares the Movie and Tv shows contents added to Netflix all through the years. - We can see that more movies have always been added. - In 2013, the number of contents added to Netflix for both were almost the same with Movies having 6 contents that year and Tv shows having 5. - It shows that in the first 5 years, only movies were added to Netflix. **Sheet 9. Release Years with Highest Contents** ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F063881abc476d466ad983f96b7f23284%2FRelease_years%20with%20highest%20movie.png?generation=1661608527082875&alt=media) This chart shows the Movies and Tv shows production year which has with highest contents on Netflix. We focus on the top 10 release year/production year. -We can see that from 2012 to 2018, Netflix added most recent contents, they made sure most recent contents per release year are higher than the older release year contents. Then in 2019, it started dropping, this may be due to the Covid-19, but further analysis may be needed to determine this. And with this, I have come to the end of this exercise. As I said this is just an exercise to test my skills as I look forward to be better. Thanks for following through. Cheers! Kaggle dataset identifier: netflix-data-cleaning-analysis-and-visualization <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # data visualization # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # read data csv df = pd.read_csv( "/kaggle/input/netflix-data-cleaning-analysis-and-visualization/netflix1.csv" ) # look into dataframe df.head() # Summary # * show_id: netflix show id # * type: type of show (movie or TV show) # * title: title of the show # * director: name of director # * country: country origin of the show # * date_added: recorded date time when the movie added to netflix # * release_year: year when the show released # * rating: the age restriction # * duration: duration of the show # * listed_in: the show category # dataframe shape df.shape # dataframe information df.info() # We see that one feature is numeric (int64), and 9 features are object. In addition, we also can identifiy if there are any missing values. Here, there are none because each column contains 8790 observations, the same number of rows we saw before with shape. # One strange thing that date_added was object, may be better to changes into string?? # inspect any null value df.describe(include="all") # look into countries in the list df["country"].unique() # change data type of date_added from object to string and extract the year into different column df["date_added"] = df["date_added"].astype("string") df["year_added"] = df["date_added"].str.extract("(\d{4})", expand=True) df["year_added"] = df["year_added"].astype("int64") df.info() # We can see 'Not Given' country name in the second row above. Means that the coloumns is not empty but the origin country of several shows were unkown. df.nunique(axis=0) # There are only two type of show in the dataframe, and the show are from 86 countries. We can draw the categorical data from type, country, release year, and rating. # **Next, we shall see from data visualization.** # see the proportion type of the show y = df.groupby("type")["show_id"].count() labels = ["Movie", "TV Show"] plt.pie(y, labels=labels, autopct="%1.1f%%", startangle=90) plt.title("The proportion of the type of shows in Netflix") plt.show() # There are a lot of movie in Netflix with approx 70% from total. # top ten coutries with the majority of shows per_country = df.groupby("country")["show_id"].count() per_country.sort_values(ascending=False).head(10).plot(kind="bar") plt.title("Top ten countries with the most shows on Netflix") plt.xlabel("country") plt.ylabel("number of shows") plt.show() # Majority of shows in Netflix originates from United states with the rough total 3300 shows. Following by India with the total around 1000 shows, and UK with the total show about 200 fewer than India. Apart from these three countries, shows from other countries (on the list) only have less than 500 shows on Netflix. country_n_year = df.groupby(["country", "release_year"])["show_id"].count() labels = [ "United States", "India", "United Kingdom", "Pakistan", "Canada", "Japan", "South Korea", "France", "Spain", ] selected = country_n_year.loc[labels] selected = selected.sort_index( level=["country", "release_year"], ascending=[False, True] ) legenda = selected.index.unique("release_year").sort_values(ascending=True) selected.unstack().plot(kind="bar", stacked=True) plt.title( "The top 10 countries with the most shows on Netflix (displayed by year of release)" ) plt.ylabel("number of shows") plt.xlabel("countries") plt.legend( title="year release", labels=legenda, bbox_to_anchor=(1.0, 1.0), loc="upper left", ncol=3, fontsize=8, ) plt.show() # rating country_n_rating = df.groupby(["country", "rating"])["show_id"].count() labels = [ "United States", "India", "United Kingdom", "Pakistan", "Canada", "Japan", "South Korea", "France", "Spain", ] selected = country_n_rating.loc[labels] selected.unstack().plot(kind="bar", stacked=True) plt.title( "Top 10 countries with most shows on Netflix differentiated by rating of the shows" ) plt.ylabel("number of shows") plt.xlabel("countries") plt.legend(title="rating", bbox_to_anchor=(1.0, 1.0), loc="upper left") plt.show() # Overall, the top ten countries have more TV-MA ratings than any other. India has more TV-14 ratings than any other country, while the US has the most R ratings. # in what year most produced show available in Netflix country_year = df.groupby("release_year")["show_id"].count() country_year_10 = country_year.sort_values(ascending=False).head(10) plt.plot(country_year_10.sort_index(), marker="o") plt.title("The year of production of the show most available on Netflix") plt.xlabel("country") plt.ylabel("number of shows") plt.show() # Of all the shows added to Netflix from all 86 countries, the most shows were released in 2018 which is twice the number of shows released in the most recent year. # is show on netflix availabe at the same time as the year when the show produced # comparing the date added and year released df["year_diff"] = df.apply( lambda x: True if x["release_year"] == x["year_added"] else False, axis=1 ) print(df["year_diff"].value_counts()) # Netflix started at very first in **1997** as a **website service**. During the time it allowed people to rent DVDs online. So that renter can get them through the mail. However, Netflix started their **streaming** platfrom in **2007**. On January 6, 2016, Netflix went live in 130 countries simultaneously. # We can see from the result that there are a lot more show that added after the year released. # look into details on the False category year_false = df[df["year_diff"] == False] plt.scatter(data=year_false, y="release_year", x="year_added") plt.ylabel("release year") plt.xlabel("year added") plt.show() # There are a lot of shows that started to be added on the streaming platform Netflix after 2016. Even the old shows from 1960 and older were added many years later in Netflix. # director with the most produced show available in netflix df.groupby("director")["show_id"].count().sort_values(ascending=False).head(10)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/723/129723093.ipynb
netflix-data-cleaning-analysis-and-visualization
ariyoomotade
[{"Id": 129723093, "ScriptId": 38063008, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14103493, "CreationDate": "05/16/2023 03:18:17", "VersionNumber": 3.0, "Title": "Data analysis - Netflix", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 156.0, "LinesInsertedFromPrevious": 34.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 122.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186063376, "KernelVersionId": 129723093, "SourceDatasetVersionId": 4123716}]
[{"Id": 4123716, "DatasetId": 2437124, "DatasourceVersionId": 4180064, "CreatorUserId": 10322697, "LicenseName": "CC0: Public Domain", "CreationDate": "08/26/2022 09:25:43", "VersionNumber": 1.0, "Title": "Netflix Data: Cleaning, Analysis and Visualization", "Slug": "netflix-data-cleaning-analysis-and-visualization", "Subtitle": "Cleaning and Visualization with Pgsql and Tableau", "Description": "Netflix is a popular streaming service that offers a vast catalog of movies, TV shows, and original contents. This dataset is a cleaned version of the original version which can be found [here](https://www.kaggle.com/datasets/shivamb/netflix-shows). The data consist of contents added to Netflix from 2008 to 2021. The oldest content is as old as 1925 and the newest as 2021. This dataset will be cleaned with PostgreSQL and visualized with Tableau. The purpose of this dataset is to test my data cleaning and visualization skills. The cleaned data can be found below and the Tableau dashboard can be found [here](https://public.tableau.com/app/profile/abdulrasaq.ariyo/viz/NetflixTVShowsMovies_16615029026580/NetflixDashboard) . \n\n## Data Cleaning\nWe are going to:\n1. Treat the Nulls \n2. Treat the duplicates\n3. Populate missing rows\n4. Drop unneeded columns\n5. Split columns\nExtra steps and more explanation on the process will be explained through the code comments\n\n```\n--View dataset\n\nSELECT * \nFROM netflix;\n\n```\n\n```\n--The show_id column is the unique id for the dataset, therefore we are going to check for duplicates\n \nSELECT show_id, COUNT(*) \nFROM netflix \nGROUP BY show_id \nORDER BY show_id DESC;\n\n--No duplicates\n```\n\n```\n--Check null values across columns\n\nSELECT COUNT(*) FILTER (WHERE show_id IS NULL) AS showid_nulls,\n COUNT(*) FILTER (WHERE type IS NULL) AS type_nulls,\n COUNT(*) FILTER (WHERE title IS NULL) AS title_nulls,\n COUNT(*) FILTER (WHERE director IS NULL) AS director_nulls,\n\t COUNT(*) FILTER (WHERE movie_cast IS NULL) AS movie_cast_nulls,\n\t COUNT(*) FILTER (WHERE country IS NULL) AS country_nulls,\n COUNT(*) FILTER (WHERE date_added IS NULL) AS date_addes_nulls,\n COUNT(*) FILTER (WHERE release_year IS NULL) AS release_year_nulls,\n COUNT(*) FILTER (WHERE rating IS NULL) AS rating_nulls,\n\t COUNT(*) FILTER (WHERE duration IS NULL) AS duration_nulls,\n COUNT(*) FILTER (WHERE listed_in IS NULL) AS listed_in_nulls,\n\t COUNT(*) FILTER (WHERE description IS NULL) AS description_nulls\nFROM netflix;\n```\n```\nWe can see that there are NULLS. \ndirector_nulls = 2634\nmovie_cast_nulls = 825\ncountry_nulls = 831\ndate_added_nulls = 10\nrating_nulls = 4\nduration_nulls = 3 \n```\n\nThe director column nulls is about 30% of the whole column, therefore I will not delete them. I will rather find another column to populate it. To populate the director column, we want to find out if there is relationship between movie_cast column and director column\n\n\n``` \n-- Below, we find out if some directors are likely to work with particular cast\n\nWITH cte AS\n(\nSELECT title, CONCAT(director, '---', movie_cast) AS director_cast \nFROM netflix\n)\n\nSELECT director_cast, COUNT(*) AS count\nFROM cte\nGROUP BY director_cast\nHAVING COUNT(*) &gt; 1\nORDER BY COUNT(*) DESC;\n\nWith this, we can now populate NULL rows in directors \nusing their record with movie_cast \n```\n```\nUPDATE netflix \nSET director = 'Alastair Fothergill'\nWHERE movie_cast = 'David Attenborough'\nAND director IS NULL ;\n\n--Repeat this step to populate the rest of the director nulls\n--Populate the rest of the NULL in director as \"Not Given\"\n\nUPDATE netflix \nSET director = 'Not Given'\nWHERE director IS NULL;\n\n--When I was doing this, I found a less complex and faster way to populate a column which I will use next\n```\n\nJust like the director column, I will not delete the nulls in country. Since the country column is related to director and movie, we are going to populate the country column with the director column\n\n```\n--Populate the country using the director column\n\nSELECT COALESCE(nt.country,nt2.country) \nFROM netflix AS nt\nJOIN netflix AS nt2 \nON nt.director = nt2.director \nAND nt.show_id &lt;&gt; nt2.show_id\nWHERE nt.country IS NULL;\nUPDATE netflix\nSET country = nt2.country\nFROM netflix AS nt2\nWHERE netflix.director = nt2.director and netflix.show_id &lt;&gt; nt2.show_id \nAND netflix.country IS NULL;\n\n\n--To confirm if there are still directors linked to country that refuse to update\n\nSELECT director, country, date_added\nFROM netflix\nWHERE country IS NULL;\n\n--Populate the rest of the NULL in director as \"Not Given\"\n\nUPDATE netflix \nSET country = 'Not Given'\nWHERE country IS NULL;\n```\n\nThe date_added rows nulls is just 10 out of over 8000 rows, deleting them cannot affect our analysis or visualization\n\n```\n--Show date_added nulls\n\nSELECT show_id, date_added\nFROM netflix_clean\nWHERE date_added IS NULL;\n\n--DELETE nulls\n\nDELETE FROM netflix\nWHERE show_id \nIN ('6797', 's6067', 's6175', 's6807', 's6902', 's7255', 's7197', 's7407', 's7848', 's8183');\n\n```\n\nrating nulls is 4. Delete them\n```\n--Show rating NULLS\n\nSELECT show_id, rating\nFROM netflix_clean\nWHERE date_added IS NULL;\n\n--Delete the nulls, and show deleted fields\nDELETE FROM netflix \nWHERE show_id \nIN (SELECT show_id FROM netflix WHERE rating IS NULL)\nRETURNING *;\n```\n\n--duration nulls is 4. Delete them\n```\n\nDELETE FROM netflix \nWHERE show_id \nIN (SELECT show_id FROM netflix WHERE duration IS NULL);\n```\nNow run the query to show the number of nulls in each column to confirm if there are still nulls. After this, run the query to confirm the row number in each column is the same\n\n```\n--Check to confirm the number of rows are the same(NO NULL)\n\nSELECT count(*) filter (where show_id IS NOT NULL) AS showid_nulls,\n count(*) filter (where type IS NOT NULL) AS type_nulls,\n count(*) filter (where title IS NOT NULL) AS title_nulls,\n count(*) filter (where director IS NOT NULL) AS director_nulls,\n\t count(*) filter (where country IS NOT NULL) AS country_nulls,\n count(*) filter (where date_added IS NOT NULL) AS date_addes_nulls,\n count(*) filter (where release_year IS NOT NULL) AS release_year_nulls,\n count(*) filter (where rating IS NOT NULL) AS rating_nulls,\n\t count(*) filter (where duration IS NOT NULL) AS duration_nulls,\n count(*) filter (where listed_in IS NOT NULL) AS listed_in_nulls\nFROM netflix;\n\n --Total number of rows are the same in all columns\n```\nWe can drop the description and movie_cast column because they are not needed for our analysis or visualization task. \n```\n--DROP unneeded columns\n\nALTER TABLE netflix\nDROP COLUMN movie_cast, \nDROP COLUMN description;\n```\nSome of the rows in country column has multiple countries, for my visualization, I only need one country per row to make my map visualization clean and easy. Therefore, I am going to split the country column and retain the first country by the left which I believe is the original country of the movie\n```\nSELECT *,\n\t SPLIT_PART(country,',',1) AS countryy, \n SPLIT_PART(country,',',2),\n\t SPLIT_PART(country,',',4),\n\t SPLIT_PART(country,',',5),\n\t SPLIT_PART(country,',',6),\n\t SPLIT_PART(country,',',7),\n\t SPLIT_PART(country,',',8),\n\t SPLIT_PART(country,',',9),\n\t SPLIT_PART(country,',',10) \n\t \nFROM netflix;\n\t \n-- NOW lets update the table\n\nALTER TABLE netflix \nADD country1 varchar(500);\nUPDATE netflix \nSET country1 = SPLIT_PART(country, ',', 1);\n\n--This will create a column named country1 and Update it with the first split country.\n```\n\nDelete the country column that has multiple country entries\n```\n--Delete column\nALTER TABLE netflix \nDROP COLUMN country;\n```\nRename the country1 column to country\n```\nALTER TABLE netflix \nRENAME COLUMN country1 TO country;\n```\n\n## Data Visualization\nAfter cleaning, the dataset is set for some analysis and visualization with Tableau. \n\n**Note: In the visualization captions, Contents means Movies and TV shows, and Content may either mean Movie or TV Show**. \n\n**Sheet 1. Content type in percentage**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F1c95dee22870057541bc3c1cce7b1a36%2FType%20percent.png?generation=1661603826265148&alt=media)\n\nThis first sheet shows the two categories of content in the dataset which are Movie and Tv show. \n- As we can see the majority of the content is Movie which takes 69.9%. \n- There are more details in the tooltip which shows the exact count of Movie and Tv show\n\n\n**Sheet 2. Movie & TV Show by Country**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F511f5a3f07f2fa9d8faea77d1dd21180%2FNumber%20of%20shows%20by%20map.png?generation=1661604888232420&alt=media)\n\nThis shows the the total amount of Movies and Tv shows per country within the given period of time(2008 - 2021). This can be noted by the size of the coloured circle in the map. \n- We can see that the United State of America has the largest size, followed by India and the United Kingdom. \n- In the Tableau hosted dashboard/sheet, there is a filter for the years between 2008 and 2021 to calculate yearly record.\n\n To give an alternate and a clearer view. Movie & TV shows by country bar chart is below\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F64e9f79965e62a4bb63b04acc835a07c%2FNumber%20of%20shows%20by%20bar.png?generation=1661609485785468&alt=media)\n\n\n**Sheet 3. Number of Contents Added through the Years**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2Fe02290507d0be382870f6651e3682cd1%2FNumber%20of%20Contents%20added%20by%20year.png?generation=1661605691430129&alt=media)\n\nThis time series chart shows the total number of contents added to Netflix all through the given years (2008 - 2021)\n- It shows that most movies and tv shows on Netflix were added in 2019\n- In the Tableau sheet, there is a filter to know how much Movies and Tv shows were added in each month of the year \n\n\n**Sheet 4. Top Directors**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F0fa58900b62df123b690da63b6111a3a%2FDirector.png?generation=1661606812783874&alt=media)\n\nThis chart shows the top 10 directors with most contents on Netflix. This char shows the count of Movie and Tv shows in their catalouge. \n- We can see that most of these directors contents are movies. \n- We can also note that the duo of Raul Campos and Jan Suter are fond of working together and have directed 18 movies on Netflix. \n\n\n**Sheet 5. Top Genres** \n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2Fbc458d5885c3d7bcd3e690962c5cc2c3%2FTop%20Genres.png?generation=1661607262740686&alt=media)\n\nThis chart shows the genres with the highest numbers on Netflix. \n- We can see that Drama & International movies followed by Documentary have the highest number of contents on Netflix within the period.\n\n\n**Sheet 6. Top Ratings**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F181df392cb0006559cd9fb19a29cadef%2FRating.png?generation=1661607535247137&alt=media)\n \nRating is a system to rate motion picture's suitability for certain audiences based on its content. This chart shows the top ratings on Netflix\n-We can note that most contents on Netflix are rated TV-MA. TV-MA in the United States by the TV Parental Guidelines signifies content for mature audiences. \n\n\n**Sheet 7. Oldest Contents on Netflix by Content Release year**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F263493038e8dacd330c9e54aed2c467b%2FOldest%20shows%20on%20netflix.png?generation=1661607864455871&alt=media)\n\nThis table shows the 10 oldest movies and tv shows on Netflix\n- The oldest is as old as 1925\n\n**Sheet 8. Content Types over the Years**\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F520da629aceef21a7af890198897c58e%2FContent%20Type%20over%20the%20years.png?generation=1661608071825961&alt=media)\n\nThis line chart compares the Movie and Tv shows contents added to Netflix all through the years.\n- We can see that more movies have always been added. \n- In 2013, the number of contents added to Netflix for both were almost the same with Movies having 6 contents that year and Tv shows having 5.\n- It shows that in the first 5 years, only movies were added to Netflix. \n\n\n**Sheet 9. Release Years with Highest Contents**\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10322697%2F063881abc476d466ad983f96b7f23284%2FRelease_years%20with%20highest%20movie.png?generation=1661608527082875&alt=media)\n\nThis chart shows the Movies and Tv shows production year which has with highest contents on Netflix. We focus on the top 10 release year/production year. \n-We can see that from 2012 to 2018, Netflix added most recent contents, they made sure most recent contents per release year are higher than the older release year contents. Then in 2019, it started dropping, this may be due to the Covid-19, but further analysis may be needed to determine this. \n\n And with this, I have come to the end of this exercise. As I said this is just an exercise to test my skills as I look forward to be better. Thanks for following through. Cheers!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2437124, "CreatorUserId": 10322697, "OwnerUserId": 10322697.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4123716.0, "CurrentDatasourceVersionId": 4180064.0, "ForumId": 2464656, "Type": 2, "CreationDate": "08/26/2022 09:25:43", "LastActivityDate": "08/26/2022", "TotalViews": 96354, "TotalDownloads": 16114, "TotalVotes": 270, "TotalKernels": 23}]
[{"Id": 10322697, "UserName": "ariyoomotade", "DisplayName": "Abdulrasaq Ariyo", "RegisterDate": "04/22/2022", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # data visualization # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # read data csv df = pd.read_csv( "/kaggle/input/netflix-data-cleaning-analysis-and-visualization/netflix1.csv" ) # look into dataframe df.head() # Summary # * show_id: netflix show id # * type: type of show (movie or TV show) # * title: title of the show # * director: name of director # * country: country origin of the show # * date_added: recorded date time when the movie added to netflix # * release_year: year when the show released # * rating: the age restriction # * duration: duration of the show # * listed_in: the show category # dataframe shape df.shape # dataframe information df.info() # We see that one feature is numeric (int64), and 9 features are object. In addition, we also can identifiy if there are any missing values. Here, there are none because each column contains 8790 observations, the same number of rows we saw before with shape. # One strange thing that date_added was object, may be better to changes into string?? # inspect any null value df.describe(include="all") # look into countries in the list df["country"].unique() # change data type of date_added from object to string and extract the year into different column df["date_added"] = df["date_added"].astype("string") df["year_added"] = df["date_added"].str.extract("(\d{4})", expand=True) df["year_added"] = df["year_added"].astype("int64") df.info() # We can see 'Not Given' country name in the second row above. Means that the coloumns is not empty but the origin country of several shows were unkown. df.nunique(axis=0) # There are only two type of show in the dataframe, and the show are from 86 countries. We can draw the categorical data from type, country, release year, and rating. # **Next, we shall see from data visualization.** # see the proportion type of the show y = df.groupby("type")["show_id"].count() labels = ["Movie", "TV Show"] plt.pie(y, labels=labels, autopct="%1.1f%%", startangle=90) plt.title("The proportion of the type of shows in Netflix") plt.show() # There are a lot of movie in Netflix with approx 70% from total. # top ten coutries with the majority of shows per_country = df.groupby("country")["show_id"].count() per_country.sort_values(ascending=False).head(10).plot(kind="bar") plt.title("Top ten countries with the most shows on Netflix") plt.xlabel("country") plt.ylabel("number of shows") plt.show() # Majority of shows in Netflix originates from United states with the rough total 3300 shows. Following by India with the total around 1000 shows, and UK with the total show about 200 fewer than India. Apart from these three countries, shows from other countries (on the list) only have less than 500 shows on Netflix. country_n_year = df.groupby(["country", "release_year"])["show_id"].count() labels = [ "United States", "India", "United Kingdom", "Pakistan", "Canada", "Japan", "South Korea", "France", "Spain", ] selected = country_n_year.loc[labels] selected = selected.sort_index( level=["country", "release_year"], ascending=[False, True] ) legenda = selected.index.unique("release_year").sort_values(ascending=True) selected.unstack().plot(kind="bar", stacked=True) plt.title( "The top 10 countries with the most shows on Netflix (displayed by year of release)" ) plt.ylabel("number of shows") plt.xlabel("countries") plt.legend( title="year release", labels=legenda, bbox_to_anchor=(1.0, 1.0), loc="upper left", ncol=3, fontsize=8, ) plt.show() # rating country_n_rating = df.groupby(["country", "rating"])["show_id"].count() labels = [ "United States", "India", "United Kingdom", "Pakistan", "Canada", "Japan", "South Korea", "France", "Spain", ] selected = country_n_rating.loc[labels] selected.unstack().plot(kind="bar", stacked=True) plt.title( "Top 10 countries with most shows on Netflix differentiated by rating of the shows" ) plt.ylabel("number of shows") plt.xlabel("countries") plt.legend(title="rating", bbox_to_anchor=(1.0, 1.0), loc="upper left") plt.show() # Overall, the top ten countries have more TV-MA ratings than any other. India has more TV-14 ratings than any other country, while the US has the most R ratings. # in what year most produced show available in Netflix country_year = df.groupby("release_year")["show_id"].count() country_year_10 = country_year.sort_values(ascending=False).head(10) plt.plot(country_year_10.sort_index(), marker="o") plt.title("The year of production of the show most available on Netflix") plt.xlabel("country") plt.ylabel("number of shows") plt.show() # Of all the shows added to Netflix from all 86 countries, the most shows were released in 2018 which is twice the number of shows released in the most recent year. # is show on netflix availabe at the same time as the year when the show produced # comparing the date added and year released df["year_diff"] = df.apply( lambda x: True if x["release_year"] == x["year_added"] else False, axis=1 ) print(df["year_diff"].value_counts()) # Netflix started at very first in **1997** as a **website service**. During the time it allowed people to rent DVDs online. So that renter can get them through the mail. However, Netflix started their **streaming** platfrom in **2007**. On January 6, 2016, Netflix went live in 130 countries simultaneously. # We can see from the result that there are a lot more show that added after the year released. # look into details on the False category year_false = df[df["year_diff"] == False] plt.scatter(data=year_false, y="release_year", x="year_added") plt.ylabel("release year") plt.xlabel("year added") plt.show() # There are a lot of shows that started to be added on the streaming platform Netflix after 2016. Even the old shows from 1960 and older were added many years later in Netflix. # director with the most produced show available in netflix df.groupby("director")["show_id"].count().sort_values(ascending=False).head(10)
false
1
1,958
0
6,189
1,958
129624430
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn import svm from sklearn.neural_network import MLPClassifier from sklearn.metrics import confusion_matrix, classification_report from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.model_selection import train_test_split wine = pd.read_csv("winequality-red.csv", sep=";") wine.head() wine.info() wine.isnull().sum() # preprocessing bins = (2, 6.5, 8) group_names = ["bad", "good"] wine["quality"] = pd.cut(wine["quality"], bins=bins, labels=group_names) wine["qulaity"].unique() label_qulaity = LabelEncoder() wine["quality"] = label_qulity.fit_transform(wine["quality"])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/624/129624430.ipynb
null
null
[{"Id": 129624430, "ScriptId": 38546090, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12883251, "CreationDate": "05/15/2023 10:06:45", "VersionNumber": 2.0, "Title": "notebookca2c852f82", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 59.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 42.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn import svm from sklearn.neural_network import MLPClassifier from sklearn.metrics import confusion_matrix, classification_report from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.model_selection import train_test_split wine = pd.read_csv("winequality-red.csv", sep=";") wine.head() wine.info() wine.isnull().sum() # preprocessing bins = (2, 6.5, 8) group_names = ["bad", "good"] wine["quality"] = pd.cut(wine["quality"], bins=bins, labels=group_names) wine["qulaity"].unique() label_qulaity = LabelEncoder() wine["quality"] = label_qulity.fit_transform(wine["quality"])
false
0
396
0
396
396
129624823
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df_train = pd.read_csv("/kaggle/input/titanic/train.csv") df_train.head() # Find Number of null values in each column df_train.isna().sum() # Find the person with maximum and minimum age df_train.Age.max() df_train.Age.min() # Find the mode value of Age (occurring most often) df_train.Age.mode().values[0] df_train.describe() df_train.dtypes # How many passengers have this most frequent age? df_train["PassengerId"][df_train["Age"] == df_train.Age.mode().values[0]].count() df_train.Embarked.unique() df_train.describe() # How will you deal with null values? # 1. Remove column # 2. Remove record # 3. Replace with some value # Also Mention reason for decision in comments. # Solution: # 1. For Age we can replace with Average age as age has about 20 percent null # which we can not discard column or record # 2. We can drop Cabin column as 77% are null # 3. For Embarked we can drop row as only 2 records are null
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/624/129624823.ipynb
null
null
[{"Id": 129624823, "ScriptId": 38545640, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3136640, "CreationDate": "05/15/2023 10:10:11", "VersionNumber": 1.0, "Title": "notebook055bc8ca9c", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 65.0, "LinesInsertedFromPrevious": 65.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df_train = pd.read_csv("/kaggle/input/titanic/train.csv") df_train.head() # Find Number of null values in each column df_train.isna().sum() # Find the person with maximum and minimum age df_train.Age.max() df_train.Age.min() # Find the mode value of Age (occurring most often) df_train.Age.mode().values[0] df_train.describe() df_train.dtypes # How many passengers have this most frequent age? df_train["PassengerId"][df_train["Age"] == df_train.Age.mode().values[0]].count() df_train.Embarked.unique() df_train.describe() # How will you deal with null values? # 1. Remove column # 2. Remove record # 3. Replace with some value # Also Mention reason for decision in comments. # Solution: # 1. For Age we can replace with Average age as age has about 20 percent null # which we can not discard column or record # 2. We can drop Cabin column as 77% are null # 3. For Embarked we can drop row as only 2 records are null
false
0
485
0
485
485
129653895
# FathomNet 2023 # Shifting seas, shifting species: Out-of-sample detection in the deep ocean # # # Introduction # ## About Files # * `multilabel_classification/train.csv` - csv list of training images and categories # * `object_detection/train.json` - the training images, annotations, and categories in COCO formatted json # * `object_detection/eval.json` - the evaluation images in COCO formatted json # * `sample_submission.csv` - a sample submission file in the correct format # * `category_key.csv` - key mapping numerical index to category and supercategory name # * `demo_download.ipynb` - python notebook demonstrating download script # * `download_images.py` - python script to download imagery from FathomNet using COCO formatted json # * `requirements.txt` - python requirements to run the download script import os import numpy as np import pandas as pd import json import seaborn as sns import matplotlib.pyplot as plt import matplotlib as mpl from tqdm.notebook import tqdm from pathlib import Path import warnings warnings.filterwarnings("ignore") # seaborn custom_params = { "lines.linewidth": 1, } blues_palette = palette = sns.color_palette("Blues_r", n_colors=20) reds_palette = palette = sns.color_palette("Reds_r", n_colors=20) greys_palette = sns.color_palette("Greys", n_colors=10) blue = blues_palette[1] red = reds_palette[1] two_colors = [blue, red] sns.set() sns.set_theme(style="whitegrid", palette=blues_palette, rc=custom_params) # Define variables INDEX = "id" INPUT_ROOT = Path("/kaggle/input/fathomnet-out-of-sample-detection") OUTPUT_ROOT = Path("/kaggle/working/") TRAIN_OUTPUT_IMAGE_ROOT = Path("/kaggle/working/images/train") EVAL_OUTPUT_IMAGE_ROOT = Path("/kaggle/working/images/eval") TRAIN_FILE = INPUT_ROOT / "multilabel_classification/train.csv" CATEGORY_KEY_FILE = INPUT_ROOT / "category_key.csv" SAMPLE_SUBMISSION_FILE = INPUT_ROOT / "sample_submission.csv" EVAL_JSON_FILE = INPUT_ROOT / "object_detection/eval.json" TRAIN_JSON_FILE = INPUT_ROOT / "object_detection/train.json" ANNOTATION_FILE = OUTPUT_ROOT / "annotation.csv" TRAIN_IMAGE_DATA_FILE = OUTPUT_ROOT / "train_image_data.csv" EVAL_IMAGE_DATA_FILE = OUTPUT_ROOT / "eval_image_data.csv" # # Read Data def read_train_data(file=Cfg.TRAIN_FILE, index_col=Cfg.INDEX): return pd.read_csv(file).set_index(Cfg.INDEX) def read_category_keys(file=Cfg.CATEGORY_KEY_FILE, index_col=Cfg.INDEX): return pd.read_csv(file).set_index(Cfg.INDEX) def read_sample_submission(file=Cfg.SAMPLE_SUBMISSION_FILE, index_col=Cfg.INDEX): return pd.read_csv(file).set_index(Cfg.INDEX) def read_json(file): """Read a json file.""" f = open(file) data = json.load(f) f.close() return data def read_object_detection_train(): return read_json(Cfg.TRAIN_JSON_FILE) def read_object_detection_eval(): return read_json(Cfg.EVAL_JSON_FILE)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/653/129653895.ipynb
null
null
[{"Id": 129653895, "ScriptId": 38554283, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9338538, "CreationDate": "05/15/2023 14:00:05", "VersionNumber": 1.0, "Title": "notebook839fa56005", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 92.0, "LinesInsertedFromPrevious": 92.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# FathomNet 2023 # Shifting seas, shifting species: Out-of-sample detection in the deep ocean # # # Introduction # ## About Files # * `multilabel_classification/train.csv` - csv list of training images and categories # * `object_detection/train.json` - the training images, annotations, and categories in COCO formatted json # * `object_detection/eval.json` - the evaluation images in COCO formatted json # * `sample_submission.csv` - a sample submission file in the correct format # * `category_key.csv` - key mapping numerical index to category and supercategory name # * `demo_download.ipynb` - python notebook demonstrating download script # * `download_images.py` - python script to download imagery from FathomNet using COCO formatted json # * `requirements.txt` - python requirements to run the download script import os import numpy as np import pandas as pd import json import seaborn as sns import matplotlib.pyplot as plt import matplotlib as mpl from tqdm.notebook import tqdm from pathlib import Path import warnings warnings.filterwarnings("ignore") # seaborn custom_params = { "lines.linewidth": 1, } blues_palette = palette = sns.color_palette("Blues_r", n_colors=20) reds_palette = palette = sns.color_palette("Reds_r", n_colors=20) greys_palette = sns.color_palette("Greys", n_colors=10) blue = blues_palette[1] red = reds_palette[1] two_colors = [blue, red] sns.set() sns.set_theme(style="whitegrid", palette=blues_palette, rc=custom_params) # Define variables INDEX = "id" INPUT_ROOT = Path("/kaggle/input/fathomnet-out-of-sample-detection") OUTPUT_ROOT = Path("/kaggle/working/") TRAIN_OUTPUT_IMAGE_ROOT = Path("/kaggle/working/images/train") EVAL_OUTPUT_IMAGE_ROOT = Path("/kaggle/working/images/eval") TRAIN_FILE = INPUT_ROOT / "multilabel_classification/train.csv" CATEGORY_KEY_FILE = INPUT_ROOT / "category_key.csv" SAMPLE_SUBMISSION_FILE = INPUT_ROOT / "sample_submission.csv" EVAL_JSON_FILE = INPUT_ROOT / "object_detection/eval.json" TRAIN_JSON_FILE = INPUT_ROOT / "object_detection/train.json" ANNOTATION_FILE = OUTPUT_ROOT / "annotation.csv" TRAIN_IMAGE_DATA_FILE = OUTPUT_ROOT / "train_image_data.csv" EVAL_IMAGE_DATA_FILE = OUTPUT_ROOT / "eval_image_data.csv" # # Read Data def read_train_data(file=Cfg.TRAIN_FILE, index_col=Cfg.INDEX): return pd.read_csv(file).set_index(Cfg.INDEX) def read_category_keys(file=Cfg.CATEGORY_KEY_FILE, index_col=Cfg.INDEX): return pd.read_csv(file).set_index(Cfg.INDEX) def read_sample_submission(file=Cfg.SAMPLE_SUBMISSION_FILE, index_col=Cfg.INDEX): return pd.read_csv(file).set_index(Cfg.INDEX) def read_json(file): """Read a json file.""" f = open(file) data = json.load(f) f.close() return data def read_object_detection_train(): return read_json(Cfg.TRAIN_JSON_FILE) def read_object_detection_eval(): return read_json(Cfg.EVAL_JSON_FILE)
false
0
906
0
906
906
129729575
# **print common elements** a = [1, 2, 3] b = [2, 3, 4] for i in a: for j in b: if i == j: print(i) break # **remove duplicate values** a = [1, 2, 3, 4, 5, 4, 3, 2, 6, 7, 2, 1] for i in a: count = 0 for j in a: if i == j: count = count + 1 if count > 1: a.remove(j) print(a)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/729/129729575.ipynb
null
null
[{"Id": 129729575, "ScriptId": 38580089, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15022629, "CreationDate": "05/16/2023 04:41:36", "VersionNumber": 1.0, "Title": "list assignment 2", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 21.0, "LinesInsertedFromPrevious": 21.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# **print common elements** a = [1, 2, 3] b = [2, 3, 4] for i in a: for j in b: if i == j: print(i) break # **remove duplicate values** a = [1, 2, 3, 4, 5, 4, 3, 2, 6, 7, 2, 1] for i in a: count = 0 for j in a: if i == j: count = count + 1 if count > 1: a.remove(j) print(a)
false
0
152
0
152
152
129380612
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ### Import from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import nltk import string import re train_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") train_df.head() train_df.shape train_df.index.duplicated().sum() train_df["target"].isna().sum() train_df["target"].value_counts(normalize=True) * 100 train_df.dtypes train_df.head() import pandas as pd from collections import Counter # Assuming 'df' is your dataframe with a column named 'text' # Step 1: Calculate word frequency word_freq = Counter(" ".join(train_df["text"]).split()) # Step 2: Identify 10 least frequent words rare_words = [word for word, freq in word_freq.items() if freq <= 1] import contractions import string # from spellchecker import SpellChecker def clean_tweet(text): # removes \n cleaned_text = text.replace("\n", " ") # Lower_text cleaned_text = cleaned_text.lower() # Remove_Punctuations punctuations = "@#!?+&*[]-%.:/();$=><|{}^" + "'`" for p in punctuations: cleaned_text = re.sub(re.escape(p), " ", cleaned_text) # Remove_Stopwords cleaned_text = " ".join( [ word for word in str(cleaned_text).split() if word not in stopwords.words("english") ] ) # Remove_HTMLs cleaned_text = re.sub(r"<.*?>", "", cleaned_text) # Expand_Contractions expanded_words = [] for word in cleaned_text.split(): # using contractions.fix to expand the shortened words expanded_words.append(contractions.fix(word)) cleaned_text = " ".join(expanded_words) # Remove_URLs cleaned_text = re.sub(r"http?\S+", "", cleaned_text) # Remove_Email_IDs cleaned_text = re.sub(r"[\w\.-]+@[\w\.-]+\.\w+", "", cleaned_text) # Remove_emojis emoji_pattern = re.compile( "[" "\U0001F600-\U0001F64F" # emoticons "\U0001F300-\U0001F5FF" # symbols & pictographs "\U0001F680-\U0001F6FF" # transport & map symbols "\U0001F1E0-\U0001F1FF" # flags (iOS) "\U00002500-\U00002BEF" # chinese char "\U00002702-\U000027B0" "\U00002702-\U000027B0" "\U000024C2-\U0001F251" "\U0001f926-\U0001f937" "\U00010000-\U0010ffff" "\u2640-\u2642" "\u2600-\u2B55" "\u200d" "\u23cf" "\u23e9" "\u231a" "\ufe0f" # dingbats "\u3030" "]+", flags=re.UNICODE, ) cleaned_text = emoji_pattern.sub(r"", cleaned_text) # Remove_Tweeter_Mentions_Chars cleaned_text = re.sub(r"@\w+", "", cleaned_text) # Remove_Unicode_Characters cleaned_text = cleaned_text.encode("ascii", "ignore").decode() # Remove_Digits cleaned_text = re.sub(r"\w*\d+\w*", "", cleaned_text) return cleaned_text train_df["clean_text"] = train_df["text"].apply(clean_tweet) train_df test_df["clean_text"] = test_df["text"].apply(clean_tweet) test_df # ### Tensorflow size_vocab = 10000 embedding_dim = 64 max_length = 24 from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=size_vocab, oov_token="<OOV>") tokenizer.fit_on_texts(train_df["clean_text"]) train_sequences = tokenizer.texts_to_sequences(train_df["clean_text"]) test_sequences = tokenizer.texts_to_sequences(test_df["clean_text"]) padded_train_squences = pad_sequences( train_sequences, maxlen=max_length, truncating="post", padding="post" ) padded_test_squences = pad_sequences( test_sequences, maxlen=max_length, truncating="post", padding="post" ) print(padded_train_squences.shape) print(padded_test_squences.shape) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense model = Sequential() model.add(Embedding(size_vocab, embedding_dim, input_length=max_length)) model.add(Flatten()) model.add(Dense(10, activation="relu")) model.add(Dense(1, activation="sigmoid")) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["acc"]) model.summary() model.fit(padded_train_squences, train_df["target"], epochs=20) test_sen = test_df["text"] test_sen = tokenizer.texts_to_sequences(test_sen) padd_test_sen = pad_sequences( test_sen, maxlen=max_length, truncating="post", padding="post" ) padd_test_sen.shape padded_test_squences.shape rs = model.predict(padded_test_squences) rs.shape # Apply threshold and convert to binary values threshold = 0.5 binary_predictions = [1 if pred > threshold else 0 for pred in rs] submission = pd.DataFrame({"id": test_df["id"], "target": binary_predictions}) submission submission.to_csv("submission.csv", index=False) rs test_df
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/380/129380612.ipynb
null
null
[{"Id": 129380612, "ScriptId": 38459966, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7042083, "CreationDate": "05/13/2023 09:50:11", "VersionNumber": 1.0, "Title": "NLP Beginer", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 195.0, "LinesInsertedFromPrevious": 195.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ### Import from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import nltk import string import re train_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") train_df.head() train_df.shape train_df.index.duplicated().sum() train_df["target"].isna().sum() train_df["target"].value_counts(normalize=True) * 100 train_df.dtypes train_df.head() import pandas as pd from collections import Counter # Assuming 'df' is your dataframe with a column named 'text' # Step 1: Calculate word frequency word_freq = Counter(" ".join(train_df["text"]).split()) # Step 2: Identify 10 least frequent words rare_words = [word for word, freq in word_freq.items() if freq <= 1] import contractions import string # from spellchecker import SpellChecker def clean_tweet(text): # removes \n cleaned_text = text.replace("\n", " ") # Lower_text cleaned_text = cleaned_text.lower() # Remove_Punctuations punctuations = "@#!?+&*[]-%.:/();$=><|{}^" + "'`" for p in punctuations: cleaned_text = re.sub(re.escape(p), " ", cleaned_text) # Remove_Stopwords cleaned_text = " ".join( [ word for word in str(cleaned_text).split() if word not in stopwords.words("english") ] ) # Remove_HTMLs cleaned_text = re.sub(r"<.*?>", "", cleaned_text) # Expand_Contractions expanded_words = [] for word in cleaned_text.split(): # using contractions.fix to expand the shortened words expanded_words.append(contractions.fix(word)) cleaned_text = " ".join(expanded_words) # Remove_URLs cleaned_text = re.sub(r"http?\S+", "", cleaned_text) # Remove_Email_IDs cleaned_text = re.sub(r"[\w\.-]+@[\w\.-]+\.\w+", "", cleaned_text) # Remove_emojis emoji_pattern = re.compile( "[" "\U0001F600-\U0001F64F" # emoticons "\U0001F300-\U0001F5FF" # symbols & pictographs "\U0001F680-\U0001F6FF" # transport & map symbols "\U0001F1E0-\U0001F1FF" # flags (iOS) "\U00002500-\U00002BEF" # chinese char "\U00002702-\U000027B0" "\U00002702-\U000027B0" "\U000024C2-\U0001F251" "\U0001f926-\U0001f937" "\U00010000-\U0010ffff" "\u2640-\u2642" "\u2600-\u2B55" "\u200d" "\u23cf" "\u23e9" "\u231a" "\ufe0f" # dingbats "\u3030" "]+", flags=re.UNICODE, ) cleaned_text = emoji_pattern.sub(r"", cleaned_text) # Remove_Tweeter_Mentions_Chars cleaned_text = re.sub(r"@\w+", "", cleaned_text) # Remove_Unicode_Characters cleaned_text = cleaned_text.encode("ascii", "ignore").decode() # Remove_Digits cleaned_text = re.sub(r"\w*\d+\w*", "", cleaned_text) return cleaned_text train_df["clean_text"] = train_df["text"].apply(clean_tweet) train_df test_df["clean_text"] = test_df["text"].apply(clean_tweet) test_df # ### Tensorflow size_vocab = 10000 embedding_dim = 64 max_length = 24 from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=size_vocab, oov_token="<OOV>") tokenizer.fit_on_texts(train_df["clean_text"]) train_sequences = tokenizer.texts_to_sequences(train_df["clean_text"]) test_sequences = tokenizer.texts_to_sequences(test_df["clean_text"]) padded_train_squences = pad_sequences( train_sequences, maxlen=max_length, truncating="post", padding="post" ) padded_test_squences = pad_sequences( test_sequences, maxlen=max_length, truncating="post", padding="post" ) print(padded_train_squences.shape) print(padded_test_squences.shape) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense model = Sequential() model.add(Embedding(size_vocab, embedding_dim, input_length=max_length)) model.add(Flatten()) model.add(Dense(10, activation="relu")) model.add(Dense(1, activation="sigmoid")) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["acc"]) model.summary() model.fit(padded_train_squences, train_df["target"], epochs=20) test_sen = test_df["text"] test_sen = tokenizer.texts_to_sequences(test_sen) padd_test_sen = pad_sequences( test_sen, maxlen=max_length, truncating="post", padding="post" ) padd_test_sen.shape padded_test_squences.shape rs = model.predict(padded_test_squences) rs.shape # Apply threshold and convert to binary values threshold = 0.5 binary_predictions = [1 if pred > threshold else 0 for pred in rs] submission = pd.DataFrame({"id": test_df["id"], "target": binary_predictions}) submission submission.to_csv("submission.csv", index=False) rs test_df
false
0
1,819
0
1,819
1,819
129380132
<jupyter_start><jupyter_text>Diabetes prediction dataset The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes. Kaggle dataset identifier: diabetes-prediction-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) # For Conversion of numerical columns to categorical columns. from sklearn.preprocessing import OneHotEncoder # Model Building from sklearn.linear_model import LogisticRegression from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier # Metrics Evaluation from sklearn.metrics import accuracy_score, classification_report import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) filepath = "/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv" def data_import(filepath): df = pd.read_csv(filepath) return df df = data_import(filepath) df.head() df.info() duplicated_sum = df.duplicated().sum() print(f"No of Duplicate rows: {duplicated_sum} ") df1 = df.drop_duplicates() df1.shape df1.info() # Let's check for unique columns in object columns. o = df1.dtypes == "object" object_cols = o[o].index print(f"Categorical Columns : {object_cols}") def unique_cols(df, object_cols): for i in df[object_cols]: print(f"{i} : {df[i].unique()}") unique_cols(df1, object_cols) # Let's check for value counts in gender column. df1["gender"].value_counts() # Let's encode gender columns using label encoder. from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df1["gender"] = le.fit_transform(df1["gender"]) # Check for value count in smoking_history. df1["smoking_history"].value_counts() # Let's encode smoking_history using map function. df1["smoking_history"] = df1["smoking_history"].map( {"No Info": 0, "never": 1, "former": 2, "current": 3, "not current": 4, "ever": 5} ) # Let's check correlation of columns with label. correlation = df.corr() correlation label_correlation = correlation["diabetes"].drop("diabetes") sorted_correlation = label_correlation.abs().sort_values(ascending=False) print(sorted_correlation) features = df1[ [ "blood_glucose_level", "HbA1c_level", "age", "bmi", "hypertension", "heart_disease", ] ] label = df1["diabetes"] ## Train test splitting from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(features, label, test_size=0.2) print(f"x train shape : {x_train.shape}") print(f"y train shape : {y_train.shape}") print(f"x test shape : {x_test.shape}") print(f"y test shape : {y_test.shape}") pred_dict = {} def model(modelname, x_train, x_test, y_train, y_test): model_build = modelname(random_state=42) model_build.fit(x_train, y_train) y_pred = model_build.predict(x_test) return y_pred y_pred = model(LogisticRegression, x_train, x_test, y_train, y_test) pred_dict["Logistic_y_pred"] = y_pred y_pred = model(GradientBoostingClassifier, x_train, x_test, y_train, y_test) pred_dict["Gradientboosting_y_pred"] = y_pred y_pred = model(DecisionTreeClassifier, x_train, x_test, y_train, y_test) pred_dict["DecisionTree_y_pred"] = y_pred # Classification Metrics Evaluation function metrics_dict = {} def classification_metrics(y_test, y_pred): acc_score = accuracy_score(y_test, y_pred) print(f"Classification Report : \n{classification_report(y_test,y_pred)}") return acc_score metrics_dict["Log_acc_score"] = classification_metrics( y_test, pred_dict["Logistic_y_pred"] ) metrics_dict["Gradientboosting_acc_score"] = classification_metrics( y_test, pred_dict["Gradientboosting_y_pred"] ) metrics_dict["DecisionTree_acc_score"] = classification_metrics( y_test, pred_dict["DecisionTree_y_pred"] ) metrics_dict
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/380/129380132.ipynb
diabetes-prediction-dataset
iammustafatz
[{"Id": 129380132, "ScriptId": 38466525, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5820249, "CreationDate": "05/13/2023 09:45:10", "VersionNumber": 1.0, "Title": "Diabetes prediction dataset", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 154.0, "LinesInsertedFromPrevious": 154.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185374364, "KernelVersionId": 129380132, "SourceDatasetVersionId": 5344155}]
[{"Id": 5344155, "DatasetId": 3102947, "DatasourceVersionId": 5417553, "CreatorUserId": 11427441, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "04/08/2023 06:11:45", "VersionNumber": 1.0, "Title": "Diabetes prediction dataset", "Slug": "diabetes-prediction-dataset", "Subtitle": "A Comprehensive Dataset for Predicting Diabetes with Medical & Demographic Data", "Description": "The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3102947, "CreatorUserId": 11427441, "OwnerUserId": 11427441.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5344155.0, "CurrentDatasourceVersionId": 5417553.0, "ForumId": 3166206, "Type": 2, "CreationDate": "04/08/2023 06:11:45", "LastActivityDate": "04/08/2023", "TotalViews": 127619, "TotalDownloads": 24886, "TotalVotes": 309, "TotalKernels": 120}]
[{"Id": 11427441, "UserName": "iammustafatz", "DisplayName": "Mohammed Mustafa", "RegisterDate": "08/29/2022", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) # For Conversion of numerical columns to categorical columns. from sklearn.preprocessing import OneHotEncoder # Model Building from sklearn.linear_model import LogisticRegression from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier # Metrics Evaluation from sklearn.metrics import accuracy_score, classification_report import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) filepath = "/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv" def data_import(filepath): df = pd.read_csv(filepath) return df df = data_import(filepath) df.head() df.info() duplicated_sum = df.duplicated().sum() print(f"No of Duplicate rows: {duplicated_sum} ") df1 = df.drop_duplicates() df1.shape df1.info() # Let's check for unique columns in object columns. o = df1.dtypes == "object" object_cols = o[o].index print(f"Categorical Columns : {object_cols}") def unique_cols(df, object_cols): for i in df[object_cols]: print(f"{i} : {df[i].unique()}") unique_cols(df1, object_cols) # Let's check for value counts in gender column. df1["gender"].value_counts() # Let's encode gender columns using label encoder. from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df1["gender"] = le.fit_transform(df1["gender"]) # Check for value count in smoking_history. df1["smoking_history"].value_counts() # Let's encode smoking_history using map function. df1["smoking_history"] = df1["smoking_history"].map( {"No Info": 0, "never": 1, "former": 2, "current": 3, "not current": 4, "ever": 5} ) # Let's check correlation of columns with label. correlation = df.corr() correlation label_correlation = correlation["diabetes"].drop("diabetes") sorted_correlation = label_correlation.abs().sort_values(ascending=False) print(sorted_correlation) features = df1[ [ "blood_glucose_level", "HbA1c_level", "age", "bmi", "hypertension", "heart_disease", ] ] label = df1["diabetes"] ## Train test splitting from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(features, label, test_size=0.2) print(f"x train shape : {x_train.shape}") print(f"y train shape : {y_train.shape}") print(f"x test shape : {x_test.shape}") print(f"y test shape : {y_test.shape}") pred_dict = {} def model(modelname, x_train, x_test, y_train, y_test): model_build = modelname(random_state=42) model_build.fit(x_train, y_train) y_pred = model_build.predict(x_test) return y_pred y_pred = model(LogisticRegression, x_train, x_test, y_train, y_test) pred_dict["Logistic_y_pred"] = y_pred y_pred = model(GradientBoostingClassifier, x_train, x_test, y_train, y_test) pred_dict["Gradientboosting_y_pred"] = y_pred y_pred = model(DecisionTreeClassifier, x_train, x_test, y_train, y_test) pred_dict["DecisionTree_y_pred"] = y_pred # Classification Metrics Evaluation function metrics_dict = {} def classification_metrics(y_test, y_pred): acc_score = accuracy_score(y_test, y_pred) print(f"Classification Report : \n{classification_report(y_test,y_pred)}") return acc_score metrics_dict["Log_acc_score"] = classification_metrics( y_test, pred_dict["Logistic_y_pred"] ) metrics_dict["Gradientboosting_acc_score"] = classification_metrics( y_test, pred_dict["Gradientboosting_y_pred"] ) metrics_dict["DecisionTree_acc_score"] = classification_metrics( y_test, pred_dict["DecisionTree_y_pred"] ) metrics_dict
false
0
1,228
2
1,419
1,228
129214662
<jupyter_start><jupyter_text>Starbucks Nutrition Facts ``` Nutrition facts for several Starbucks food items ``` | Column | Description | | ------- | ------------------------------------------------------------ | | item | The name of the food item. | | calories| The amount of calories in the food item. | | fat | The quantity of fat in grams present in the food item. | | carb | The amount of carbohydrates in grams found in the food item. | | fiber | The quantity of dietary fiber in grams in the food item. | | protein | The amount of protein in grams contained in the food item. | | type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). | Kaggle dataset identifier: starbucks-nutrition <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from pprint import pprint # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Starbucks Nutritional Information # Starbucks provides comprehensive nutritional information for their food and beverage offerings, allowing customers to make informed choices based on their dietary preferences and health goals. The nutritional information includes details such as calories, fat content, carbohydrates, fiber, and protein for each menu item. # By making this information readily available, Starbucks aims to empower individuals to make choices that align with their nutritional needs and preferences. Whether you're looking for lower-calorie options, watching your fat or carb intake, or seeking protein-rich alternatives, the nutritional information provided by Starbucks helps you navigate their menu with confidence. # ## Data Coverage # The data encompasses a range of food items, from baked goods and bistro boxes to hot breakfast items, parfaits, petite treats, salads, and sandwiches. Each item is categorized based on its type, making it easier for customers to find options that suit their dietary requirements or preferences. # ## Transparency and Informed Decisions # By offering transparent and detailed nutritional information, Starbucks reinforces its commitment to supporting customers in making informed decisions about their food choices. Whether you're enjoying a coffee break or grabbing a quick bite, the nutritional information empowers you to enjoy Starbucks' offerings while being mindful of your nutritional goals. # --- # ## Data Dictionary # The data consists of nutrition facts for several Starbucks food items. It is organized in the form of a data frame with 77 observations and 7 variables. # ### Variables # - **item**: The name of the food item (string). # - **calories**: The number of calories in the food item (integer). # - **fat**: The amount of fat in grams (numeric). # - **carb**: The amount of carbohydrates in grams (numeric). # - **fiber**: The amount of dietary fiber in grams (numeric). # - **protein**: The amount of protein in grams (numeric). # - **type**: The categorization of the food item, with levels bakery, bistro box, hot breakfast, parfait, petite, salad, and sandwich (factor). # ### Additional Information # - The data frame has a RangeIndex from 0 to 76. # - There are no missing values (non-null count is 77 for all columns). # - The original data frame had an additional column named "Unnamed: 0", which has been removed for this improved data dictionary. # --- # . df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv", index_col=0) df df.describe df.info()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/214/129214662.ipynb
starbucks-nutrition
utkarshx27
[{"Id": 129214662, "ScriptId": 38415291, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14543633, "CreationDate": "05/11/2023 22:43:38", "VersionNumber": 1.0, "Title": "Starbucks EDA", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 85.0, "LinesInsertedFromPrevious": 85.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185060918, "KernelVersionId": 129214662, "SourceDatasetVersionId": 5651811}]
[{"Id": 5651811, "DatasetId": 3248696, "DatasourceVersionId": 5727183, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/10/2023 05:42:59", "VersionNumber": 1.0, "Title": "Starbucks Nutrition Facts", "Slug": "starbucks-nutrition", "Subtitle": "Nutrition facts for several Starbucks food items", "Description": "```\nNutrition facts for several Starbucks food items\n```\n| Column | Description |\n| ------- | ------------------------------------------------------------ |\n| item | The name of the food item. |\n| calories| The amount of calories in the food item. |\n| fat | The quantity of fat in grams present in the food item. |\n| carb | The amount of carbohydrates in grams found in the food item. |\n| fiber | The quantity of dietary fiber in grams in the food item. |\n| protein | The amount of protein in grams contained in the food item. |\n| type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). |", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3248696, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5651811.0, "CurrentDatasourceVersionId": 5727183.0, "ForumId": 3314049, "Type": 2, "CreationDate": "05/10/2023 05:42:59", "LastActivityDate": "05/10/2023", "TotalViews": 12557, "TotalDownloads": 2321, "TotalVotes": 59, "TotalKernels": 17}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from pprint import pprint # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Starbucks Nutritional Information # Starbucks provides comprehensive nutritional information for their food and beverage offerings, allowing customers to make informed choices based on their dietary preferences and health goals. The nutritional information includes details such as calories, fat content, carbohydrates, fiber, and protein for each menu item. # By making this information readily available, Starbucks aims to empower individuals to make choices that align with their nutritional needs and preferences. Whether you're looking for lower-calorie options, watching your fat or carb intake, or seeking protein-rich alternatives, the nutritional information provided by Starbucks helps you navigate their menu with confidence. # ## Data Coverage # The data encompasses a range of food items, from baked goods and bistro boxes to hot breakfast items, parfaits, petite treats, salads, and sandwiches. Each item is categorized based on its type, making it easier for customers to find options that suit their dietary requirements or preferences. # ## Transparency and Informed Decisions # By offering transparent and detailed nutritional information, Starbucks reinforces its commitment to supporting customers in making informed decisions about their food choices. Whether you're enjoying a coffee break or grabbing a quick bite, the nutritional information empowers you to enjoy Starbucks' offerings while being mindful of your nutritional goals. # --- # ## Data Dictionary # The data consists of nutrition facts for several Starbucks food items. It is organized in the form of a data frame with 77 observations and 7 variables. # ### Variables # - **item**: The name of the food item (string). # - **calories**: The number of calories in the food item (integer). # - **fat**: The amount of fat in grams (numeric). # - **carb**: The amount of carbohydrates in grams (numeric). # - **fiber**: The amount of dietary fiber in grams (numeric). # - **protein**: The amount of protein in grams (numeric). # - **type**: The categorization of the food item, with levels bakery, bistro box, hot breakfast, parfait, petite, salad, and sandwich (factor). # ### Additional Information # - The data frame has a RangeIndex from 0 to 76. # - There are no missing values (non-null count is 77 for all columns). # - The original data frame had an additional column named "Unnamed: 0", which has been removed for this improved data dictionary. # --- # . df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv", index_col=0) df df.describe df.info()
false
1
831
1
1,047
831
129263774
# # Cases and Casualties due to COVID-19 in Countries # ## Importing Libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## Data input and cleaning df = pd.read_csv( "/kaggle/input/corona-data-cleaned/corona_virus.csv", encoding="unicode_escape" ) df.head() df.info() df_dropped = df.drop( columns=["New Cases", "New Deaths", "New Recovered", "serious_critical"], axis=1 ) df_dropped.head() df_dropped.info() df[df_dropped.isnull()] df = df_dropped.dropna() df.head() df.head() df = df.rename(columns={"Country,Other": "Country"}) for i, r in df.iterrows(): r["Total Cases"] = r["Total Cases"].replace(",", "") r["Total Deaths"] = r["Total Deaths"].replace(",", "") r["Total Recovered"] = r["Total Recovered"].replace(",", "") r["Active Cases"] = r["Active Cases"].replace(",", "") r["cases_per_1M"] = r["cases_per_1M"].replace(",", "") r["deaths_per_1M"] = r["deaths_per_1M"].replace(",", "") r["Total Tests"] = r["Total Tests"].replace(",", "") r["tests_per_1M"] = r["tests_per_1M"].replace(",", "") r["Population"] = r["Population"].replace(",", "") df["Total Cases"] = df["Total Cases"].astype(str).astype(int) df["Total Deaths"] = df["Total Deaths"].astype(str).astype(int) df["Total Recovered"] = df["Total Recovered"].astype(str).astype(int) df["Active Cases"] = df["Active Cases"].astype(str).astype(int) df["cases_per_1M"] = df["cases_per_1M"].astype(str).astype(int) df["deaths_per_1M"] = df["deaths_per_1M"].astype(str).astype(int) df["Total Tests"] = df["Total Tests"].astype(str).astype(int) df["tests_per_1M"] = df["tests_per_1M"].astype(str).astype(int) df["Population"] = df["Population"].astype(str).astype(int) df.head() df.info() # ## Analysis # ### Countries with highest number of Cases # Country vs Number of Cases total_cases = pd.DataFrame(df, columns=["Country", "Total Cases", "Total Deaths"]) total_cases = total_cases.sort_values(by="Total Cases", ascending=False) # total_cases.head() # top 10 countries top_cases = total_cases[:10] # plot fig1 = px.bar(top_cases, x="Country", y="Total Cases", text="Total Cases") fig2 = px.bar(top_cases, x="Country", y="Total Deaths", text="Total Deaths") fig1.show() fig2.show() # ### Scatter Plot showing Cases vs Deaths in Countries with highest number of cases. # total cases vs total deaths df.head() # top 10 countries top_cases = total_cases[:10] fig3 = px.scatter(top_cases, x="Total Cases", y="Total Deaths", text="Country") fig3.update_traces(textposition="top center") fig3.show() top_cases.head(10) # ### Cases and Deaths per 1M population cases_per_M = pd.DataFrame( df, columns=["Country", "cases_per_1M", "deaths_per_1M", "tests_per_1M"] ).sort_values(by="cases_per_1M", ascending=False) top_cases = cases_per_M[:10] fig4 = px.bar(top_cases, y="Country", x=["cases_per_1M"]) fig5 = px.bar(top_cases, y="Country", x=["deaths_per_1M"]) fig4.show() fig5.show() top_cases.head() # ### Countries with highest fatalities # Fatalities in world wide Population # top fatality country by ratio df["death/population %"] = round(df["Total Deaths"] / df["Population"] * 100, 2) df = df.sort_values(by=["death/population %"], ascending=False) # top 10 fatalities top_death = df[:10] fig6 = px.bar(top_death, x="Country", y="death/population %", text="death/population %") fig6.show() top_death.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/263/129263774.ipynb
null
null
[{"Id": 129263774, "ScriptId": 38423448, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8975961, "CreationDate": "05/12/2023 09:27:12", "VersionNumber": 1.0, "Title": "notebook7ec6d94635", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 134.0, "LinesInsertedFromPrevious": 134.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Cases and Casualties due to COVID-19 in Countries # ## Importing Libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## Data input and cleaning df = pd.read_csv( "/kaggle/input/corona-data-cleaned/corona_virus.csv", encoding="unicode_escape" ) df.head() df.info() df_dropped = df.drop( columns=["New Cases", "New Deaths", "New Recovered", "serious_critical"], axis=1 ) df_dropped.head() df_dropped.info() df[df_dropped.isnull()] df = df_dropped.dropna() df.head() df.head() df = df.rename(columns={"Country,Other": "Country"}) for i, r in df.iterrows(): r["Total Cases"] = r["Total Cases"].replace(",", "") r["Total Deaths"] = r["Total Deaths"].replace(",", "") r["Total Recovered"] = r["Total Recovered"].replace(",", "") r["Active Cases"] = r["Active Cases"].replace(",", "") r["cases_per_1M"] = r["cases_per_1M"].replace(",", "") r["deaths_per_1M"] = r["deaths_per_1M"].replace(",", "") r["Total Tests"] = r["Total Tests"].replace(",", "") r["tests_per_1M"] = r["tests_per_1M"].replace(",", "") r["Population"] = r["Population"].replace(",", "") df["Total Cases"] = df["Total Cases"].astype(str).astype(int) df["Total Deaths"] = df["Total Deaths"].astype(str).astype(int) df["Total Recovered"] = df["Total Recovered"].astype(str).astype(int) df["Active Cases"] = df["Active Cases"].astype(str).astype(int) df["cases_per_1M"] = df["cases_per_1M"].astype(str).astype(int) df["deaths_per_1M"] = df["deaths_per_1M"].astype(str).astype(int) df["Total Tests"] = df["Total Tests"].astype(str).astype(int) df["tests_per_1M"] = df["tests_per_1M"].astype(str).astype(int) df["Population"] = df["Population"].astype(str).astype(int) df.head() df.info() # ## Analysis # ### Countries with highest number of Cases # Country vs Number of Cases total_cases = pd.DataFrame(df, columns=["Country", "Total Cases", "Total Deaths"]) total_cases = total_cases.sort_values(by="Total Cases", ascending=False) # total_cases.head() # top 10 countries top_cases = total_cases[:10] # plot fig1 = px.bar(top_cases, x="Country", y="Total Cases", text="Total Cases") fig2 = px.bar(top_cases, x="Country", y="Total Deaths", text="Total Deaths") fig1.show() fig2.show() # ### Scatter Plot showing Cases vs Deaths in Countries with highest number of cases. # total cases vs total deaths df.head() # top 10 countries top_cases = total_cases[:10] fig3 = px.scatter(top_cases, x="Total Cases", y="Total Deaths", text="Country") fig3.update_traces(textposition="top center") fig3.show() top_cases.head(10) # ### Cases and Deaths per 1M population cases_per_M = pd.DataFrame( df, columns=["Country", "cases_per_1M", "deaths_per_1M", "tests_per_1M"] ).sort_values(by="cases_per_1M", ascending=False) top_cases = cases_per_M[:10] fig4 = px.bar(top_cases, y="Country", x=["cases_per_1M"]) fig5 = px.bar(top_cases, y="Country", x=["deaths_per_1M"]) fig4.show() fig5.show() top_cases.head() # ### Countries with highest fatalities # Fatalities in world wide Population # top fatality country by ratio df["death/population %"] = round(df["Total Deaths"] / df["Population"] * 100, 2) df = df.sort_values(by=["death/population %"], ascending=False) # top 10 fatalities top_death = df[:10] fig6 = px.bar(top_death, x="Country", y="death/population %", text="death/population %") fig6.show() top_death.head()
false
0
1,370
0
1,370
1,370
129319649
<jupyter_start><jupyter_text>COVID-19 Dataset [![forthebadge](https://forthebadge.com/images/badges/made-with-python.svg)](https://forthebadge.com) [![forthebadge](https://forthebadge.com/images/badges/uses-git.svg)](https://forthebadge.com) ### Context - A new coronavirus designated 2019-nCoV was first identified in Wuhan, the capital of China's Hubei province - People developed pneumonia without a clear cause and for which existing vaccines or treatments were not effective. - The virus has shown evidence of human-to-human transmission - Transmission rate (rate of infection) appeared to escalate in mid-January 2020 - As of 30 January 2020, approximately 8,243 cases have been confirmed ### Content &gt; * **full_grouped.csv** - Day to day country wise no. of cases (Has County/State/Province level data) &gt; * **covid_19_clean_complete.csv** - Day to day country wise no. of cases (Doesn't have County/State/Province level data) &gt; * **country_wise_latest.csv** - Latest country level no. of cases &gt; * **day_wise.csv** - Day wise no. of cases (Doesn't have country level data) &gt; * **usa_county_wise.csv** - Day to day county level no. of cases &gt; * **worldometer_data.csv** - Latest data from https://www.worldometers.info/ Kaggle dataset identifier: corona-virus-report <jupyter_script>import pandas as pd import numpy as np import seaborn as sns df = pd.read_csv("D:\ml-practice\projects\covid19\country_wise_latest.csv") df.dtypes df.isnull().sum() df.drop(["New cases", "New deaths", "New recovered"], axis=1, inplace=True) df.describe() df.head(10) # Using correlation matrix to find out the correlations btw the dataset features correlation_mattrix = df.corr(method="pearson") # methods = pearson,kendall,spearman correlation_mattrix correlation_mattrix1 = df.corr(method="kendall") # methods = pearson,kendall,spearman correlation_mattrix1 correlation_mattrix2 = df.corr(method="spearman") # methods = pearson,kendall,spearman correlation_mattrix2 import matplotlib.pyplot as plt # drawing heatmap using seaborn library method=pearson sns.heatmap(correlation_mattrix, annot=True) plt.title("Correlation Matrix") plt.xlabel("Covid Features") plt.ylabel("Covid Features") plt.show() sns.heatmap(correlation_mattrix1, annot=True) plt.title("correlation mattix method=kendall") plt.xlabel("features") plt.ylabel("features") plt.show() sns.heatmap(correlation_mattrix2, annot=True) plt.title("heatmap method=spearman") plt.show() # # We have high correlation between these features # -confirmed =deaths,recovered,active,confirmed last week,one week change # -deaths=recovered,active,confirmed last week,one week change # -recovered=active,confirmed last week,one week change # -deaths/100 cases=deaths/100 recoverd # -confirmed last weeek=1 week chnange # and vice versa df2 = ( df.groupby("WHO Region")[["Confirmed", "Deaths", "Active", "Confirmed last week"]] .sum() .reset_index() ) df2.head() x = df2["WHO Region"] y = df2["Deaths"] plt.figure(figsize=(10, 6)) plt.bar(x, y) plt.xlabel("Regions") plt.ylabel("Deaths") plt.title("REGIONS VS DEATHS") plt.show() # 10 countries with most deaths top_deaths = ( df[["Country/Region", "Deaths"]] .sort_values(by=["Deaths"], ascending=False) .head(10) ) top_deaths sns.barplot(data=top_deaths, y="Country/Region", x="Deaths") plt.title("TOP 10 COUNTRIES WITH HIGHEST NUMBER OF DEATHS") plt.show() # top ten recovered countries top_recov = ( df[["Country/Region", "Recovered"]] .sort_values(by=["Recovered"], ascending=False) .head(10) ) top_recov plt.figure(figsize=(10, 6)) sns.barplot(data=top_recov, x="Country/Region", y="Recovered") plt.title("TOP 10 RECOVERED COUNTRIES") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/319/129319649.ipynb
corona-virus-report
imdevskp
[{"Id": 129319649, "ScriptId": 38449285, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9361910, "CreationDate": "05/12/2023 18:29:43", "VersionNumber": 2.0, "Title": "Covid 19 Data Analsis", "EvaluationDate": "05/12/2023", "IsChange": false, "TotalLines": 87.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 87.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185256526, "KernelVersionId": 129319649, "SourceDatasetVersionId": 1402868}]
[{"Id": 1402868, "DatasetId": 494766, "DatasourceVersionId": 1435700, "CreatorUserId": 1302389, "LicenseName": "Other (specified in description)", "CreationDate": "08/07/2020 03:47:47", "VersionNumber": 166.0, "Title": "COVID-19 Dataset", "Slug": "corona-virus-report", "Subtitle": "Number of Confirmed, Death and Recovered cases every day across the globe", "Description": "[![forthebadge](https://forthebadge.com/images/badges/made-with-python.svg)](https://forthebadge.com) [![forthebadge](https://forthebadge.com/images/badges/uses-git.svg)](https://forthebadge.com)\n\n### Context\n\n- A new coronavirus designated 2019-nCoV was first identified in Wuhan, the capital of China's Hubei province\n- People developed pneumonia without a clear cause and for which existing vaccines or treatments were not effective. \n- The virus has shown evidence of human-to-human transmission\n- Transmission rate (rate of infection) appeared to escalate in mid-January 2020\n- As of 30 January 2020, approximately 8,243 cases have been confirmed\n\n\n### Content\n\n&gt; * **full_grouped.csv** - Day to day country wise no. of cases (Has County/State/Province level data) \n&gt; * **covid_19_clean_complete.csv** - Day to day country wise no. of cases (Doesn't have County/State/Province level data) \n&gt; * **country_wise_latest.csv** - Latest country level no. of cases \n&gt; * **day_wise.csv** - Day wise no. of cases (Doesn't have country level data) \n&gt; * **usa_county_wise.csv** - Day to day county level no. of cases \n&gt; * **worldometer_data.csv** - Latest data from https://www.worldometers.info/ \n\n\n### Acknowledgements / Data Source\n\n&gt; https://github.com/CSSEGISandData/COVID-19\n&gt; https://www.worldometers.info/\n\n### Collection methodology\n\n&gt; https://github.com/imdevskp/covid_19_jhu_data_web_scrap_and_cleaning\n\n### Cover Photo\n\n&gt; Photo from National Institutes of Allergy and Infectious Diseases\n&gt; https://www.niaid.nih.gov/news-events/novel-coronavirus-sarscov2-images\n&gt; https://blogs.cdc.gov/publichealthmatters/2019/04/h1n1/\n\n### Similar Datasets\n\n&gt; * COVID-19 - https://www.kaggle.com/imdevskp/corona-virus-report \n&gt; * MERS - https://www.kaggle.com/imdevskp/mers-outbreak-dataset-20122019\n&gt; * Ebola Western Africa 2014 Outbreak - https://www.kaggle.com/imdevskp/ebola-outbreak-20142016-complete-dataset\n&gt; * H1N1 | Swine Flu 2009 Pandemic Dataset - https://www.kaggle.com/imdevskp/h1n1-swine-flu-2009-pandemic-dataset\n&gt; * SARS 2003 Pandemic - https://www.kaggle.com/imdevskp/sars-outbreak-2003-complete-dataset\n&gt; * HIV AIDS - https://www.kaggle.com/imdevskp/hiv-aids-dataset", "VersionNotes": "update", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 494766, "CreatorUserId": 1302389, "OwnerUserId": 1302389.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1402868.0, "CurrentDatasourceVersionId": 1435700.0, "ForumId": 507860, "Type": 2, "CreationDate": "01/30/2020 14:46:58", "LastActivityDate": "01/30/2020", "TotalViews": 1009073, "TotalDownloads": 271389, "TotalVotes": 2056, "TotalKernels": 642}]
[{"Id": 1302389, "UserName": "imdevskp", "DisplayName": "Devakumar K. P.", "RegisterDate": "09/30/2017", "PerformanceTier": 3}]
import pandas as pd import numpy as np import seaborn as sns df = pd.read_csv("D:\ml-practice\projects\covid19\country_wise_latest.csv") df.dtypes df.isnull().sum() df.drop(["New cases", "New deaths", "New recovered"], axis=1, inplace=True) df.describe() df.head(10) # Using correlation matrix to find out the correlations btw the dataset features correlation_mattrix = df.corr(method="pearson") # methods = pearson,kendall,spearman correlation_mattrix correlation_mattrix1 = df.corr(method="kendall") # methods = pearson,kendall,spearman correlation_mattrix1 correlation_mattrix2 = df.corr(method="spearman") # methods = pearson,kendall,spearman correlation_mattrix2 import matplotlib.pyplot as plt # drawing heatmap using seaborn library method=pearson sns.heatmap(correlation_mattrix, annot=True) plt.title("Correlation Matrix") plt.xlabel("Covid Features") plt.ylabel("Covid Features") plt.show() sns.heatmap(correlation_mattrix1, annot=True) plt.title("correlation mattix method=kendall") plt.xlabel("features") plt.ylabel("features") plt.show() sns.heatmap(correlation_mattrix2, annot=True) plt.title("heatmap method=spearman") plt.show() # # We have high correlation between these features # -confirmed =deaths,recovered,active,confirmed last week,one week change # -deaths=recovered,active,confirmed last week,one week change # -recovered=active,confirmed last week,one week change # -deaths/100 cases=deaths/100 recoverd # -confirmed last weeek=1 week chnange # and vice versa df2 = ( df.groupby("WHO Region")[["Confirmed", "Deaths", "Active", "Confirmed last week"]] .sum() .reset_index() ) df2.head() x = df2["WHO Region"] y = df2["Deaths"] plt.figure(figsize=(10, 6)) plt.bar(x, y) plt.xlabel("Regions") plt.ylabel("Deaths") plt.title("REGIONS VS DEATHS") plt.show() # 10 countries with most deaths top_deaths = ( df[["Country/Region", "Deaths"]] .sort_values(by=["Deaths"], ascending=False) .head(10) ) top_deaths sns.barplot(data=top_deaths, y="Country/Region", x="Deaths") plt.title("TOP 10 COUNTRIES WITH HIGHEST NUMBER OF DEATHS") plt.show() # top ten recovered countries top_recov = ( df[["Country/Region", "Recovered"]] .sort_values(by=["Recovered"], ascending=False) .head(10) ) top_recov plt.figure(figsize=(10, 6)) sns.barplot(data=top_recov, x="Country/Region", y="Recovered") plt.title("TOP 10 RECOVERED COUNTRIES") plt.show()
false
0
821
0
1,227
821
129319325
<jupyter_start><jupyter_text>Diabetes Dataset ### Context This dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective is to predict based on diagnostic measurements whether a patient has diabetes. ### Content Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage. - Pregnancies: Number of times pregnant - Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test - BloodPressure: Diastolic blood pressure (mm Hg) - SkinThickness: Triceps skin fold thickness (mm) - Insulin: 2-Hour serum insulin (mu U/ml) - BMI: Body mass index (weight in kg/(height in m)^2) - DiabetesPedigreeFunction: Diabetes pedigree function - Age: Age (years) - Outcome: Class variable (0 or 1) #### Sources: (a) Original owners: National Institute of Diabetes and Digestive and Kidney Diseases (b) Donor of database: Vincent Sigillito ([email protected]) Research Center, RMI Group Leader Applied Physics Laboratory The Johns Hopkins University Johns Hopkins Road Laurel, MD 20707 (301) 953-6231 (c) Date received: 9 May 1990 #### Past Usage: 1. Smith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., \& Johannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast the onset of diabetes mellitus. In {\it Proceedings of the Symposium on Computer Applications and Medical Care} (pp. 261--265). IEEE Computer Society Press. The diagnostic, binary-valued variable investigated is whether the patient shows signs of diabetes according to World Health Organization criteria (i.e., if the 2 hour post-load plasma glucose was at least 200 mg/dl at any survey examination or if found during routine medical care). The population lives near Phoenix, Arizona, USA. Results: Their ADAP algorithm makes a real-valued prediction between 0 and 1. This was transformed into a binary decision using a cutoff of 0.448. Using 576 training instances, the sensitivity and specificity of their algorithm was 76% on the remaining 192 instances. #### Relevant Information: Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage. ADAP is an adaptive learning routine that generates and executes digital analogs of perceptron-like devices. It is a unique algorithm; see the paper for details. #### Number of Instances: 768 #### Number of Attributes: 8 plus class #### For Each Attribute: (all numeric-valued) 1. Number of times pregnant 2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test 3. Diastolic blood pressure (mm Hg) 4. Triceps skin fold thickness (mm) 5. 2-Hour serum insulin (mu U/ml) 6. Body mass index (weight in kg/(height in m)^2) 7. Diabetes pedigree function 8. Age (years) 9. Class variable (0 or 1) #### Missing Attribute Values: Yes #### Class Distribution: (class value 1 is interpreted as "tested positive for diabetes") Kaggle dataset identifier: diabetes-data-set <jupyter_code>import pandas as pd df = pd.read_csv('diabetes-data-set/diabetes.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 768 entries, 0 to 767 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Pregnancies 768 non-null int64 1 Glucose 768 non-null int64 2 BloodPressure 768 non-null int64 3 SkinThickness 768 non-null int64 4 Insulin 768 non-null int64 5 BMI 768 non-null float64 6 DiabetesPedigreeFunction 768 non-null float64 7 Age 768 non-null int64 8 Outcome 768 non-null int64 dtypes: float64(2), int64(7) memory usage: 54.1 KB <jupyter_text>Examples: { "Pregnancies": 6.0, "Glucose": 148.0, "BloodPressure": 72.0, "SkinThickness": 35.0, "Insulin": 0.0, "BMI": 33.6, "DiabetesPedigreeFunction": 0.627, "Age": 50.0, "Outcome": 1.0 } { "Pregnancies": 1.0, "Glucose": 85.0, "BloodPressure": 66.0, "SkinThickness": 29.0, "Insulin": 0.0, "BMI": 26.6, "DiabetesPedigreeFunction": 0.35100000000000003, "Age": 31.0, "Outcome": 0.0 } { "Pregnancies": 8.0, "Glucose": 183.0, "BloodPressure": 64.0, "SkinThickness": 0.0, "Insulin": 0.0, "BMI": 23.3, "DiabetesPedigreeFunction": 0.672, "Age": 32.0, "Outcome": 1.0 } { "Pregnancies": 1.0, "Glucose": 89.0, "BloodPressure": 66.0, "SkinThickness": 23.0, "Insulin": 94.0, "BMI": 28.1, "DiabetesPedigreeFunction": 0.167, "Age": 21.0, "Outcome": 0.0 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session diabetes = pd.read_csv("/kaggle/input/diabetes-data-set/diabetes.csv") diabetes import statistics np.mean(diabetes["BloodPressure"]) statistics.mode(diabetes["BloodPressure"]) import matplotlib.pyplot as plt plt.bar(diabetes.index, diabetes["BloodPressure"]) np.median(diabetes["BloodPressure"]) plt.bar(diabetes.index, diabetes["Insulin"]) np.median(diabetes["Insulin"]) np.mean(diabetes["Insulin"]) statistics.mode(diabetes["Insulin"]) diabetes[ diabetes[(diabetes["Insulin"] > 16) | diabetes["Insulin"] < 166]["Outcome"] == 1 ].count diabetes[ diabetes[(diabetes["Insulin"] > 16) | diabetes["Insulin"] < 166]["Outcome"] == 0 ].count plt.bar(diabetes.index, diabetes["Age"]) np.mean(diabetes["Age"]) np.median(diabetes["Age"]) statistics.mode(diabetes["Age"]) diabetes[diabetes["Age"] == 22].count diabetes["Age"].value_counts()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/319/129319325.ipynb
diabetes-data-set
mathchi
[{"Id": 129319325, "ScriptId": 38446686, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5750785, "CreationDate": "05/12/2023 18:25:29", "VersionNumber": 1.0, "Title": "statistical-analysis", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 62.0, "LinesInsertedFromPrevious": 62.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185255804, "KernelVersionId": 129319325, "SourceDatasetVersionId": 1400440}]
[{"Id": 1400440, "DatasetId": 818300, "DatasourceVersionId": 1433199, "CreatorUserId": 3650837, "LicenseName": "CC0: Public Domain", "CreationDate": "08/05/2020 21:27:01", "VersionNumber": 1.0, "Title": "Diabetes Dataset", "Slug": "diabetes-data-set", "Subtitle": "This dataset is originally from the N. Inst. of Diabetes & Diges. & Kidney Dis.", "Description": "### Context\n\nThis dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective is to predict based on diagnostic measurements whether a patient has diabetes.\n\n\n### Content\n\nSeveral constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage.\n\n- Pregnancies: Number of times pregnant \n- Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test \n- BloodPressure: Diastolic blood pressure (mm Hg) \n- SkinThickness: Triceps skin fold thickness (mm) \n- Insulin: 2-Hour serum insulin (mu U/ml) \n- BMI: Body mass index (weight in kg/(height in m)^2) \n- DiabetesPedigreeFunction: Diabetes pedigree function \n- Age: Age (years) \n- Outcome: Class variable (0 or 1)\n\n#### Sources:\n (a) Original owners: National Institute of Diabetes and Digestive and\n Kidney Diseases\n (b) Donor of database: Vincent Sigillito ([email protected])\n Research Center, RMI Group Leader\n Applied Physics Laboratory\n The Johns Hopkins University\n Johns Hopkins Road\n Laurel, MD 20707\n (301) 953-6231\n (c) Date received: 9 May 1990\n\n#### Past Usage:\n 1. Smith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., \\&\n Johannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast\n the onset of diabetes mellitus. In {\\it Proceedings of the Symposium\n on Computer Applications and Medical Care} (pp. 261--265). IEEE\n Computer Society Press.\n\n The diagnostic, binary-valued variable investigated is whether the\n patient shows signs of diabetes according to World Health Organization\n criteria (i.e., if the 2 hour post-load plasma glucose was at least \n 200 mg/dl at any survey examination or if found during routine medical\n care). The population lives near Phoenix, Arizona, USA.\n\n Results: Their ADAP algorithm makes a real-valued prediction between\n 0 and 1. This was transformed into a binary decision using a cutoff of \n 0.448. Using 576 training instances, the sensitivity and specificity\n of their algorithm was 76% on the remaining 192 instances.\n\n#### Relevant Information:\n Several constraints were placed on the selection of these instances from\n a larger database. In particular, all patients here are females at\n least 21 years old of Pima Indian heritage. ADAP is an adaptive learning\n routine that generates and executes digital analogs of perceptron-like\n devices. It is a unique algorithm; see the paper for details.\n\n#### Number of Instances: 768\n\n#### Number of Attributes: 8 plus class \n\n#### For Each Attribute: (all numeric-valued)\n 1. Number of times pregnant\n 2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test\n 3. Diastolic blood pressure (mm Hg)\n 4. Triceps skin fold thickness (mm)\n 5. 2-Hour serum insulin (mu U/ml)\n 6. Body mass index (weight in kg/(height in m)^2)\n 7. Diabetes pedigree function\n 8. Age (years)\n 9. Class variable (0 or 1)\n\n#### Missing Attribute Values: Yes\n\n#### Class Distribution: (class value 1 is interpreted as \"tested positive for\n diabetes\")", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 818300, "CreatorUserId": 3650837, "OwnerUserId": 3650837.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1400440.0, "CurrentDatasourceVersionId": 1433199.0, "ForumId": 833406, "Type": 2, "CreationDate": "08/05/2020 21:27:01", "LastActivityDate": "08/05/2020", "TotalViews": 440450, "TotalDownloads": 65613, "TotalVotes": 496, "TotalKernels": 245}]
[{"Id": 3650837, "UserName": "mathchi", "DisplayName": "Mehmet Akturk", "RegisterDate": "09/01/2019", "PerformanceTier": 3}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session diabetes = pd.read_csv("/kaggle/input/diabetes-data-set/diabetes.csv") diabetes import statistics np.mean(diabetes["BloodPressure"]) statistics.mode(diabetes["BloodPressure"]) import matplotlib.pyplot as plt plt.bar(diabetes.index, diabetes["BloodPressure"]) np.median(diabetes["BloodPressure"]) plt.bar(diabetes.index, diabetes["Insulin"]) np.median(diabetes["Insulin"]) np.mean(diabetes["Insulin"]) statistics.mode(diabetes["Insulin"]) diabetes[ diabetes[(diabetes["Insulin"] > 16) | diabetes["Insulin"] < 166]["Outcome"] == 1 ].count diabetes[ diabetes[(diabetes["Insulin"] > 16) | diabetes["Insulin"] < 166]["Outcome"] == 0 ].count plt.bar(diabetes.index, diabetes["Age"]) np.mean(diabetes["Age"]) np.median(diabetes["Age"]) statistics.mode(diabetes["Age"]) diabetes[diabetes["Age"] == 22].count diabetes["Age"].value_counts()
[{"diabetes-data-set/diabetes.csv": {"column_names": "[\"Pregnancies\", \"Glucose\", \"BloodPressure\", \"SkinThickness\", \"Insulin\", \"BMI\", \"DiabetesPedigreeFunction\", \"Age\", \"Outcome\"]", "column_data_types": "{\"Pregnancies\": \"int64\", \"Glucose\": \"int64\", \"BloodPressure\": \"int64\", \"SkinThickness\": \"int64\", \"Insulin\": \"int64\", \"BMI\": \"float64\", \"DiabetesPedigreeFunction\": \"float64\", \"Age\": \"int64\", \"Outcome\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 768 entries, 0 to 767\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Pregnancies 768 non-null int64 \n 1 Glucose 768 non-null int64 \n 2 BloodPressure 768 non-null int64 \n 3 SkinThickness 768 non-null int64 \n 4 Insulin 768 non-null int64 \n 5 BMI 768 non-null float64\n 6 DiabetesPedigreeFunction 768 non-null float64\n 7 Age 768 non-null int64 \n 8 Outcome 768 non-null int64 \ndtypes: float64(2), int64(7)\nmemory usage: 54.1 KB\n", "summary": "{\"Pregnancies\": {\"count\": 768.0, \"mean\": 3.8450520833333335, \"std\": 3.3695780626988694, \"min\": 0.0, \"25%\": 1.0, \"50%\": 3.0, \"75%\": 6.0, \"max\": 17.0}, \"Glucose\": {\"count\": 768.0, \"mean\": 120.89453125, \"std\": 31.97261819513622, \"min\": 0.0, \"25%\": 99.0, \"50%\": 117.0, \"75%\": 140.25, \"max\": 199.0}, \"BloodPressure\": {\"count\": 768.0, \"mean\": 69.10546875, \"std\": 19.355807170644777, \"min\": 0.0, \"25%\": 62.0, \"50%\": 72.0, \"75%\": 80.0, \"max\": 122.0}, \"SkinThickness\": {\"count\": 768.0, \"mean\": 20.536458333333332, \"std\": 15.952217567727637, \"min\": 0.0, \"25%\": 0.0, \"50%\": 23.0, \"75%\": 32.0, \"max\": 99.0}, \"Insulin\": {\"count\": 768.0, \"mean\": 79.79947916666667, \"std\": 115.24400235133817, \"min\": 0.0, \"25%\": 0.0, \"50%\": 30.5, \"75%\": 127.25, \"max\": 846.0}, \"BMI\": {\"count\": 768.0, \"mean\": 31.992578124999998, \"std\": 7.884160320375446, \"min\": 0.0, \"25%\": 27.3, \"50%\": 32.0, \"75%\": 36.6, \"max\": 67.1}, \"DiabetesPedigreeFunction\": {\"count\": 768.0, \"mean\": 0.47187630208333325, \"std\": 0.3313285950127749, \"min\": 0.078, \"25%\": 0.24375, \"50%\": 0.3725, \"75%\": 0.62625, \"max\": 2.42}, \"Age\": {\"count\": 768.0, \"mean\": 33.240885416666664, \"std\": 11.760231540678685, \"min\": 21.0, \"25%\": 24.0, \"50%\": 29.0, \"75%\": 41.0, \"max\": 81.0}, \"Outcome\": {\"count\": 768.0, \"mean\": 0.3489583333333333, \"std\": 0.47695137724279896, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"Pregnancies\":{\"0\":6,\"1\":1,\"2\":8,\"3\":1},\"Glucose\":{\"0\":148,\"1\":85,\"2\":183,\"3\":89},\"BloodPressure\":{\"0\":72,\"1\":66,\"2\":64,\"3\":66},\"SkinThickness\":{\"0\":35,\"1\":29,\"2\":0,\"3\":23},\"Insulin\":{\"0\":0,\"1\":0,\"2\":0,\"3\":94},\"BMI\":{\"0\":33.6,\"1\":26.6,\"2\":23.3,\"3\":28.1},\"DiabetesPedigreeFunction\":{\"0\":0.627,\"1\":0.351,\"2\":0.672,\"3\":0.167},\"Age\":{\"0\":50,\"1\":31,\"2\":32,\"3\":21},\"Outcome\":{\"0\":1,\"1\":0,\"2\":1,\"3\":0}}"}}]
true
1
<start_data_description><data_path>diabetes-data-set/diabetes.csv: <column_names> ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age', 'Outcome'] <column_types> {'Pregnancies': 'int64', 'Glucose': 'int64', 'BloodPressure': 'int64', 'SkinThickness': 'int64', 'Insulin': 'int64', 'BMI': 'float64', 'DiabetesPedigreeFunction': 'float64', 'Age': 'int64', 'Outcome': 'int64'} <dataframe_Summary> {'Pregnancies': {'count': 768.0, 'mean': 3.8450520833333335, 'std': 3.3695780626988694, 'min': 0.0, '25%': 1.0, '50%': 3.0, '75%': 6.0, 'max': 17.0}, 'Glucose': {'count': 768.0, 'mean': 120.89453125, 'std': 31.97261819513622, 'min': 0.0, '25%': 99.0, '50%': 117.0, '75%': 140.25, 'max': 199.0}, 'BloodPressure': {'count': 768.0, 'mean': 69.10546875, 'std': 19.355807170644777, 'min': 0.0, '25%': 62.0, '50%': 72.0, '75%': 80.0, 'max': 122.0}, 'SkinThickness': {'count': 768.0, 'mean': 20.536458333333332, 'std': 15.952217567727637, 'min': 0.0, '25%': 0.0, '50%': 23.0, '75%': 32.0, 'max': 99.0}, 'Insulin': {'count': 768.0, 'mean': 79.79947916666667, 'std': 115.24400235133817, 'min': 0.0, '25%': 0.0, '50%': 30.5, '75%': 127.25, 'max': 846.0}, 'BMI': {'count': 768.0, 'mean': 31.992578124999998, 'std': 7.884160320375446, 'min': 0.0, '25%': 27.3, '50%': 32.0, '75%': 36.6, 'max': 67.1}, 'DiabetesPedigreeFunction': {'count': 768.0, 'mean': 0.47187630208333325, 'std': 0.3313285950127749, 'min': 0.078, '25%': 0.24375, '50%': 0.3725, '75%': 0.62625, 'max': 2.42}, 'Age': {'count': 768.0, 'mean': 33.240885416666664, 'std': 11.760231540678685, 'min': 21.0, '25%': 24.0, '50%': 29.0, '75%': 41.0, 'max': 81.0}, 'Outcome': {'count': 768.0, 'mean': 0.3489583333333333, 'std': 0.47695137724279896, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}} <dataframe_info> RangeIndex: 768 entries, 0 to 767 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Pregnancies 768 non-null int64 1 Glucose 768 non-null int64 2 BloodPressure 768 non-null int64 3 SkinThickness 768 non-null int64 4 Insulin 768 non-null int64 5 BMI 768 non-null float64 6 DiabetesPedigreeFunction 768 non-null float64 7 Age 768 non-null int64 8 Outcome 768 non-null int64 dtypes: float64(2), int64(7) memory usage: 54.1 KB <some_examples> {'Pregnancies': {'0': 6, '1': 1, '2': 8, '3': 1}, 'Glucose': {'0': 148, '1': 85, '2': 183, '3': 89}, 'BloodPressure': {'0': 72, '1': 66, '2': 64, '3': 66}, 'SkinThickness': {'0': 35, '1': 29, '2': 0, '3': 23}, 'Insulin': {'0': 0, '1': 0, '2': 0, '3': 94}, 'BMI': {'0': 33.6, '1': 26.6, '2': 23.3, '3': 28.1}, 'DiabetesPedigreeFunction': {'0': 0.627, '1': 0.351, '2': 0.672, '3': 0.167}, 'Age': {'0': 50, '1': 31, '2': 32, '3': 21}, 'Outcome': {'0': 1, '1': 0, '2': 1, '3': 0}} <end_description>
508
1
2,215
508
129319372
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import confusion_matrix, classification_report, roc_curve, auc import matplotlib.pyplot as plt import numpy as np # Load the data df = pd.read_csv( "/kaggle/input/amazon-cells-labelledtxt/amazon_cells_labelled.txt", sep="\t", header=None, ) # Split the data into features (X) and target (y) X = df[0] y = df[1] # Convert text data into TF-IDF vectorizer = TfidfVectorizer() X = vectorizer.fit_transform(X) # Naive Bayes classifer # Split the data into training and test sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Define the model naive_bayes = MultinomialNB() # Define the grid search parameters parameters = {"alpha": [0.1, 0.5, 1.0, 1.5, 2.0]} # Conduct grid search grid_search = GridSearchCV(estimator=naive_bayes, param_grid=parameters, cv=10) grid_search.fit(X_train, y_train) # Print the best score and parameters print("Best Score: ", grid_search.best_score_) print("Best Params: ", grid_search.best_params_) # Apply the best parameters to the model naive_bayes = MultinomialNB(alpha=grid_search.best_params_["alpha"]) naive_bayes.fit(X_train, y_train) # Predict the test set y_pred = naive_bayes.predict(X_test) # Print the confusion matrix, classification report print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # Compute ROC curve and ROC area fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot ROC curve plt.figure() plt.plot(fpr, tpr, color="darkorange", label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="navy", linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic") plt.legend(loc="lower right") plt.show() # Logistic Regression Classifier from sklearn.linear_model import LogisticRegression # Define the model logistic_regression = LogisticRegression() # Define the grid search parameters parameters = {"C": [0.1, 0.5, 1.0, 1.5, 2.0]} # Conduct grid search grid_search = GridSearchCV(estimator=logistic_regression, param_grid=parameters, cv=10) grid_search.fit(X_train, y_train) # Print the best score and parameters print("Best Score: ", grid_search.best_score_) print("Best Params: ", grid_search.best_params_) # Apply the best parameters to the model logistic_regression = LogisticRegression(C=grid_search.best_params_["C"]) logistic_regression.fit(X_train, y_train) # Predict the test set y_pred = logistic_regression.predict(X_test) # Print the confusion matrix, classification report print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # Compute ROC curve and ROC area fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot ROC curve plt.figure() plt.plot(fpr, tpr, color="darkorange", label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="navy", linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic") plt.legend(loc="lower right") plt.show() # Decision Tree Classifier from sklearn.tree import DecisionTreeClassifier # Define the model dt = DecisionTreeClassifier() # Define the grid search parameters parameters = { "max_depth": [None, 10, 20, 30, 50], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], } # Conduct grid search grid_search = GridSearchCV(estimator=dt, param_grid=parameters, cv=10) grid_search.fit(X_train, y_train) # Print the best score and parameters print("Best Score: ", grid_search.best_score_) print("Best Params: ", grid_search.best_params_) # Apply the best parameters to the model dt = DecisionTreeClassifier( max_depth=grid_search.best_params_["max_depth"], min_samples_split=grid_search.best_params_["min_samples_split"], min_samples_leaf=grid_search.best_params_["min_samples_leaf"], ) dt.fit(X_train, y_train) # Predict the test set y_pred = dt.predict(X_test) # Print the confusion matrix, classification report print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # Compute ROC curve and ROC area fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot ROC curve plt.figure() plt.plot(fpr, tpr, color="darkorange", label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="navy", linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic") plt.legend(loc="lower right") plt.show() # KNN Classifier from sklearn.neighbors import KNeighborsClassifier # Define the model knn = KNeighborsClassifier() # Define the grid search parameters parameters = {"n_neighbors": [3, 5, 7, 9, 11]} # Conduct grid search grid_search = GridSearchCV(estimator=knn, param_grid=parameters, cv=10) grid_search.fit(X_train, y_train) # Print the best score and parameters print("Best Score: ", grid_search.best_score_) print("Best Params: ", grid_search.best_params_) # Apply the best parameters to the model knn = KNeighborsClassifier(n_neighbors=grid_search.best_params_["n_neighbors"]) knn.fit(X_train, y_train) # Predict the test set y_pred = knn.predict(X_test) # Print the confusion matrix, classification report print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # Compute ROC curve and ROC area fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot ROC curve plt.figure() plt.plot(fpr, tpr, color="darkorange", label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="navy", linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/319/129319372.ipynb
null
null
[{"Id": 129319372, "ScriptId": 38445621, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3637157, "CreationDate": "05/12/2023 18:26:01", "VersionNumber": 1.0, "Title": "Assignment2_Nihad&Yusif", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 241.0, "LinesInsertedFromPrevious": 241.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import confusion_matrix, classification_report, roc_curve, auc import matplotlib.pyplot as plt import numpy as np # Load the data df = pd.read_csv( "/kaggle/input/amazon-cells-labelledtxt/amazon_cells_labelled.txt", sep="\t", header=None, ) # Split the data into features (X) and target (y) X = df[0] y = df[1] # Convert text data into TF-IDF vectorizer = TfidfVectorizer() X = vectorizer.fit_transform(X) # Naive Bayes classifer # Split the data into training and test sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Define the model naive_bayes = MultinomialNB() # Define the grid search parameters parameters = {"alpha": [0.1, 0.5, 1.0, 1.5, 2.0]} # Conduct grid search grid_search = GridSearchCV(estimator=naive_bayes, param_grid=parameters, cv=10) grid_search.fit(X_train, y_train) # Print the best score and parameters print("Best Score: ", grid_search.best_score_) print("Best Params: ", grid_search.best_params_) # Apply the best parameters to the model naive_bayes = MultinomialNB(alpha=grid_search.best_params_["alpha"]) naive_bayes.fit(X_train, y_train) # Predict the test set y_pred = naive_bayes.predict(X_test) # Print the confusion matrix, classification report print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # Compute ROC curve and ROC area fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot ROC curve plt.figure() plt.plot(fpr, tpr, color="darkorange", label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="navy", linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic") plt.legend(loc="lower right") plt.show() # Logistic Regression Classifier from sklearn.linear_model import LogisticRegression # Define the model logistic_regression = LogisticRegression() # Define the grid search parameters parameters = {"C": [0.1, 0.5, 1.0, 1.5, 2.0]} # Conduct grid search grid_search = GridSearchCV(estimator=logistic_regression, param_grid=parameters, cv=10) grid_search.fit(X_train, y_train) # Print the best score and parameters print("Best Score: ", grid_search.best_score_) print("Best Params: ", grid_search.best_params_) # Apply the best parameters to the model logistic_regression = LogisticRegression(C=grid_search.best_params_["C"]) logistic_regression.fit(X_train, y_train) # Predict the test set y_pred = logistic_regression.predict(X_test) # Print the confusion matrix, classification report print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # Compute ROC curve and ROC area fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot ROC curve plt.figure() plt.plot(fpr, tpr, color="darkorange", label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="navy", linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic") plt.legend(loc="lower right") plt.show() # Decision Tree Classifier from sklearn.tree import DecisionTreeClassifier # Define the model dt = DecisionTreeClassifier() # Define the grid search parameters parameters = { "max_depth": [None, 10, 20, 30, 50], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], } # Conduct grid search grid_search = GridSearchCV(estimator=dt, param_grid=parameters, cv=10) grid_search.fit(X_train, y_train) # Print the best score and parameters print("Best Score: ", grid_search.best_score_) print("Best Params: ", grid_search.best_params_) # Apply the best parameters to the model dt = DecisionTreeClassifier( max_depth=grid_search.best_params_["max_depth"], min_samples_split=grid_search.best_params_["min_samples_split"], min_samples_leaf=grid_search.best_params_["min_samples_leaf"], ) dt.fit(X_train, y_train) # Predict the test set y_pred = dt.predict(X_test) # Print the confusion matrix, classification report print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # Compute ROC curve and ROC area fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot ROC curve plt.figure() plt.plot(fpr, tpr, color="darkorange", label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="navy", linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic") plt.legend(loc="lower right") plt.show() # KNN Classifier from sklearn.neighbors import KNeighborsClassifier # Define the model knn = KNeighborsClassifier() # Define the grid search parameters parameters = {"n_neighbors": [3, 5, 7, 9, 11]} # Conduct grid search grid_search = GridSearchCV(estimator=knn, param_grid=parameters, cv=10) grid_search.fit(X_train, y_train) # Print the best score and parameters print("Best Score: ", grid_search.best_score_) print("Best Params: ", grid_search.best_params_) # Apply the best parameters to the model knn = KNeighborsClassifier(n_neighbors=grid_search.best_params_["n_neighbors"]) knn.fit(X_train, y_train) # Predict the test set y_pred = knn.predict(X_test) # Print the confusion matrix, classification report print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # Compute ROC curve and ROC area fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot ROC curve plt.figure() plt.plot(fpr, tpr, color="darkorange", label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color="navy", linestyle="--") plt.xlim([0.0, 1.0]) plt.ylim
false
0
2,216
0
2,216
2,216
129319188
# hide from fastbook import * from fastai.vision.widgets import * setup_book() plant_types = "poison ivy", "green" path = Path("plants") if not path.exists(): path.mkdir() for o in plant_types: dest = path / o dest.mkdir(exist_ok=True) results = search_images_ddg(f"{o} plant") download_images(dest, urls=results) fns = get_image_files(path) fns failed = verify_images(fns) failed failed.map(Path.unlink) plants = DataBlock( blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128), ) plants = plants.new( item_tfms=RandomResizedCrop(224, min_scale=0.5), batch_tfms=aug_transforms() ) dls = plants.dataloaders(path) learn = vision_learner(dls, resnet18, metrics=error_rate) learn.fine_tune(4) interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() interp.plot_top_losses(5, nrows=1) cleaner = ImageClassifierCleaner(learn) cleaner learn.export() path = Path() path.ls(file_exts=".pkl") ims = [ "https://bgr.com/wp-content/uploads/2020/08/AdobeStock_155258329-Recovered-1.jpg?quality=70&strip=all" ] dest = "images/poisonivy.jpg" download_url(ims[0], dest) learn.predict("images/poisonivy.jpg") ims = [ "https://www.thespruce.com/thmb/3JCPAUHY6gHDg02aFaBfM1qKHBo=/4437x2958/filters:no_upscale():max_bytes(150000):strip_icc()/close-up-of-green-hellebore-flowers-562408117-5a942e5604d1cf0036b01143.jpg" ] dest = "images/greenplant.jpg" download_url(ims[0], dest) learn.predict("images/greenplant.jpg") learn = load_learner("export.pkl") labels = learn.dls.vocab def predict(img): img = PILImage.create(img) pred, pred_idx, probs = learn.predict(img) return {labels[i]: float(probs[i]) for i in range(len(labels))}
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/319/129319188.ipynb
null
null
[{"Id": 129319188, "ScriptId": 38419326, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14693253, "CreationDate": "05/12/2023 18:23:43", "VersionNumber": 1.0, "Title": "Is Poison Ivy", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 72.0, "LinesInsertedFromPrevious": 72.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# hide from fastbook import * from fastai.vision.widgets import * setup_book() plant_types = "poison ivy", "green" path = Path("plants") if not path.exists(): path.mkdir() for o in plant_types: dest = path / o dest.mkdir(exist_ok=True) results = search_images_ddg(f"{o} plant") download_images(dest, urls=results) fns = get_image_files(path) fns failed = verify_images(fns) failed failed.map(Path.unlink) plants = DataBlock( blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128), ) plants = plants.new( item_tfms=RandomResizedCrop(224, min_scale=0.5), batch_tfms=aug_transforms() ) dls = plants.dataloaders(path) learn = vision_learner(dls, resnet18, metrics=error_rate) learn.fine_tune(4) interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() interp.plot_top_losses(5, nrows=1) cleaner = ImageClassifierCleaner(learn) cleaner learn.export() path = Path() path.ls(file_exts=".pkl") ims = [ "https://bgr.com/wp-content/uploads/2020/08/AdobeStock_155258329-Recovered-1.jpg?quality=70&strip=all" ] dest = "images/poisonivy.jpg" download_url(ims[0], dest) learn.predict("images/poisonivy.jpg") ims = [ "https://www.thespruce.com/thmb/3JCPAUHY6gHDg02aFaBfM1qKHBo=/4437x2958/filters:no_upscale():max_bytes(150000):strip_icc()/close-up-of-green-hellebore-flowers-562408117-5a942e5604d1cf0036b01143.jpg" ] dest = "images/greenplant.jpg" download_url(ims[0], dest) learn.predict("images/greenplant.jpg") learn = load_learner("export.pkl") labels = learn.dls.vocab def predict(img): img = PILImage.create(img) pred, pred_idx, probs = learn.predict(img) return {labels[i]: float(probs[i]) for i in range(len(labels))}
false
0
705
0
705
705
129319337
<jupyter_start><jupyter_text>Pakistan Data Talent This comprehensive dataset features a collection of LinkedIn profiles belonging to talented data scientists hailing from Pakistan. It presents a valuable resource for researchers, recruiters, and data enthusiasts seeking insights into the diverse and growing field of data science within the Pakistani professional landscape. The dataset includes the following key information for each profile: URL, full name, headline, and location. The profile URLs provide direct access to each individual's LinkedIn page, allowing users to explore their professional background, experiences, and expertise in more detail. Whether you are a recruiter looking to identify potential candidates, a researcher investigating trends and skills in the Pakistani data science community, or simply an enthusiast curious about the professionals driving data-driven innovation in Pakistan, this dataset will prove invaluable. By making this dataset available on Kaggle, we aim to foster collaboration, knowledge sharing, and networking opportunities within the Pakistani data science community. We encourage users to leverage this dataset for various analytical and research purposes, such as demographic analysis, skillset mapping, or creating tailored outreach strategies. Note: The dataset contains publicly available information from LinkedIn profiles. We kindly request that users respect privacy and professional boundaries when utilizing this dataset, refraining from any unauthorized use or misuse of the provided information. Start exploring the wealth of talent within the Pakistani data science domain by downloading this dataset today! Kaggle dataset identifier: pakistan-data-talent <jupyter_script>import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from collections import Counter import nltk from nltk.corpus import stopwords nltk.download("stopwords") # **Load the CSV file using pandas :** data = pd.read_csv("/kaggle/input/pakistan-data-talent/Pakistan Data Talent.csv") # **Display the first 5 rows :** print(data.head()) # **Display information about the DataFrame :** display(data.info()) # **Check if there are any missing values :** print(data.isna().sum()) # **Display the distribution of locations :** sns.set_style("whitegrid") plt.figure(figsize=(12, 8)) ax = sns.countplot(x="Location", data=data) ax.set_xticklabels(ax.get_xticklabels(), rotation=90) plt.xlabel("Location", fontsize=12) plt.ylabel("Number of observations", fontsize=12) plt.title("Distribution of locations", fontsize=16) plt.tick_params(labelsize=10) plt.tight_layout() plt.show() # **Display the distribution of job titles :** top_headlines = data["Headline"].value_counts().head(25) sns.countplot(x="Headline", data=data, order=top_headlines.index) plt.xticks(rotation=90) plt.xlabel("Job Title") plt.ylabel("Number of Profiles") plt.title("Distribution of Job Titles") plt.show() # **Display the Word Frequency in Headlines :** # Remove float values in the Headline column data = data.dropna(subset=["Headline"]) # Remove stopwords stop_words = set(stopwords.words("english")) data["Headline"] = data["Headline"].apply( lambda x: " ".join( [word for word in str(x).split() if word.lower() not in stop_words] ) ) # Count word frequency in headlines word_freq = Counter(" ".join(data["Headline"]).split()).most_common(20) # Display the result as a graph plt.figure(figsize=(12, 6)) plt.bar([i[0] for i in word_freq], [i[1] for i in word_freq]) plt.xticks(rotation=90) plt.title("Word frequency in headlines") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/319/129319337.ipynb
pakistan-data-talent
hskhawaja
[{"Id": 129319337, "ScriptId": 38447296, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14180659, "CreationDate": "05/12/2023 18:25:38", "VersionNumber": 1.0, "Title": "Exploratory Data Analysis of Pakistan Data Talent", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 66.0, "LinesInsertedFromPrevious": 66.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
[{"Id": 185255807, "KernelVersionId": 129319337, "SourceDatasetVersionId": 5670125}]
[{"Id": 5670125, "DatasetId": 3259472, "DatasourceVersionId": 5745628, "CreatorUserId": 938987, "LicenseName": "CC0: Public Domain", "CreationDate": "05/12/2023 12:59:27", "VersionNumber": 1.0, "Title": "Pakistan Data Talent", "Slug": "pakistan-data-talent", "Subtitle": "Tap into the Data Talent of Pakistan - Data Scientists, ML Engineers, BI Experts", "Description": "This comprehensive dataset features a collection of LinkedIn profiles belonging to talented data scientists hailing from Pakistan. It presents a valuable resource for researchers, recruiters, and data enthusiasts seeking insights into the diverse and growing field of data science within the Pakistani professional landscape.\n\nThe dataset includes the following key information for each profile: URL, full name, headline, and location. The profile URLs provide direct access to each individual's LinkedIn page, allowing users to explore their professional background, experiences, and expertise in more detail.\n\nWhether you are a recruiter looking to identify potential candidates, a researcher investigating trends and skills in the Pakistani data science community, or simply an enthusiast curious about the professionals driving data-driven innovation in Pakistan, this dataset will prove invaluable.\n\nBy making this dataset available on Kaggle, we aim to foster collaboration, knowledge sharing, and networking opportunities within the Pakistani data science community. We encourage users to leverage this dataset for various analytical and research purposes, such as demographic analysis, skillset mapping, or creating tailored outreach strategies.\n\nNote: The dataset contains publicly available information from LinkedIn profiles. We kindly request that users respect privacy and professional boundaries when utilizing this dataset, refraining from any unauthorized use or misuse of the provided information.\n\nStart exploring the wealth of talent within the Pakistani data science domain by downloading this dataset today!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3259472, "CreatorUserId": 938987, "OwnerUserId": 938987.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5689263.0, "CurrentDatasourceVersionId": 5764863.0, "ForumId": 3325009, "Type": 2, "CreationDate": "05/12/2023 12:59:27", "LastActivityDate": "05/12/2023", "TotalViews": 2838, "TotalDownloads": 204, "TotalVotes": 25, "TotalKernels": 2}]
[{"Id": 938987, "UserName": "hskhawaja", "DisplayName": "Hussain Shahbaz Khawaja", "RegisterDate": "03/02/2017", "PerformanceTier": 2}]
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from collections import Counter import nltk from nltk.corpus import stopwords nltk.download("stopwords") # **Load the CSV file using pandas :** data = pd.read_csv("/kaggle/input/pakistan-data-talent/Pakistan Data Talent.csv") # **Display the first 5 rows :** print(data.head()) # **Display information about the DataFrame :** display(data.info()) # **Check if there are any missing values :** print(data.isna().sum()) # **Display the distribution of locations :** sns.set_style("whitegrid") plt.figure(figsize=(12, 8)) ax = sns.countplot(x="Location", data=data) ax.set_xticklabels(ax.get_xticklabels(), rotation=90) plt.xlabel("Location", fontsize=12) plt.ylabel("Number of observations", fontsize=12) plt.title("Distribution of locations", fontsize=16) plt.tick_params(labelsize=10) plt.tight_layout() plt.show() # **Display the distribution of job titles :** top_headlines = data["Headline"].value_counts().head(25) sns.countplot(x="Headline", data=data, order=top_headlines.index) plt.xticks(rotation=90) plt.xlabel("Job Title") plt.ylabel("Number of Profiles") plt.title("Distribution of Job Titles") plt.show() # **Display the Word Frequency in Headlines :** # Remove float values in the Headline column data = data.dropna(subset=["Headline"]) # Remove stopwords stop_words = set(stopwords.words("english")) data["Headline"] = data["Headline"].apply( lambda x: " ".join( [word for word in str(x).split() if word.lower() not in stop_words] ) ) # Count word frequency in headlines word_freq = Counter(" ".join(data["Headline"]).split()).most_common(20) # Display the result as a graph plt.figure(figsize=(12, 6)) plt.bar([i[0] for i in word_freq], [i[1] for i in word_freq]) plt.xticks(rotation=90) plt.title("Word frequency in headlines") plt.show()
false
1
597
5
960
597
129319655
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import nltk nltk.download("punkt") nltk.download("stopwords") nltk.download("wordnet") nltk.download("vader_lexicon") # importing required modules import PyPDF2 for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: # creating a pdf file object name = "" name = "/kaggle/input/sentiment-analysis-mnb/" + filename print("\n", name) pdfFileObj = open(name, "rb") # creating a pdf reader object pdfReader = PyPDF2.PdfReader(pdfFileObj) # printing number of pages in pdf file n = len(pdfReader.pages) # creating a page object pageObj = pdfReader.pages[0] # extracting text from pages string = "" for i in range(n - 1): pageObj = pdfReader.pages[i] string = string + " " + pageObj.extract_text() # closing the pdf file object pdfFileObj.close() from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer # importing required modules from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer import PyPDF2 sentiment = SentimentIntensityAnalyzer() text_1 = "Unless suffering is the direct and immediate object of life, our existence must entirely fail of its aim. It is absurd to look upon the enormous amount of pain that abounds everywhere in the world, and originates in needs and necessities inseparable from life itself, as serving no purpose at all and the result of mere chance. Each separate misfortune, as it comes, seems, no doubt, to be something exceptional but misfortune in general is the rule." sent_1 = sentiment.polarity_scores(text_1) print(sent_1) # importing required modules from textblob import TextBlob import PyPDF2 import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: # creating a pdf file object name = "" name = "/kaggle/input/sentiment-analysis-mnb/" + filename pdfFileObj = open(name, "rb") # creating a pdf reader object pdfReader = PyPDF2.PdfReader(pdfFileObj) # printing number of pages in pdf file n = len(pdfReader.pages) # creating a page object pageObj = pdfReader.pages[0] # extracting text from pages string = "" for i in range(n - 1): pageObj = pdfReader.pages[i] string = string + " " + pageObj.extract_text() blob = TextBlob(string) print("\n", filename) print(blob.sentiment) # closing the pdf file object pdfFileObj.close()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/319/129319655.ipynb
null
null
[{"Id": 129319655, "ScriptId": 38324356, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14301685, "CreationDate": "05/12/2023 18:29:47", "VersionNumber": 1.0, "Title": "notebook8df56ce024", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 117.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import nltk nltk.download("punkt") nltk.download("stopwords") nltk.download("wordnet") nltk.download("vader_lexicon") # importing required modules import PyPDF2 for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: # creating a pdf file object name = "" name = "/kaggle/input/sentiment-analysis-mnb/" + filename print("\n", name) pdfFileObj = open(name, "rb") # creating a pdf reader object pdfReader = PyPDF2.PdfReader(pdfFileObj) # printing number of pages in pdf file n = len(pdfReader.pages) # creating a page object pageObj = pdfReader.pages[0] # extracting text from pages string = "" for i in range(n - 1): pageObj = pdfReader.pages[i] string = string + " " + pageObj.extract_text() # closing the pdf file object pdfFileObj.close() from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer # importing required modules from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer import PyPDF2 sentiment = SentimentIntensityAnalyzer() text_1 = "Unless suffering is the direct and immediate object of life, our existence must entirely fail of its aim. It is absurd to look upon the enormous amount of pain that abounds everywhere in the world, and originates in needs and necessities inseparable from life itself, as serving no purpose at all and the result of mere chance. Each separate misfortune, as it comes, seems, no doubt, to be something exceptional but misfortune in general is the rule." sent_1 = sentiment.polarity_scores(text_1) print(sent_1) # importing required modules from textblob import TextBlob import PyPDF2 import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: # creating a pdf file object name = "" name = "/kaggle/input/sentiment-analysis-mnb/" + filename pdfFileObj = open(name, "rb") # creating a pdf reader object pdfReader = PyPDF2.PdfReader(pdfFileObj) # printing number of pages in pdf file n = len(pdfReader.pages) # creating a page object pageObj = pdfReader.pages[0] # extracting text from pages string = "" for i in range(n - 1): pageObj = pdfReader.pages[i] string = string + " " + pageObj.extract_text() blob = TextBlob(string) print("\n", filename) print(blob.sentiment) # closing the pdf file object pdfFileObj.close()
false
0
832
0
832
832
129031454
# # Introduction à l'IA # ## Initialiation du projet import numpy as np import pandas as pd df = pd.read_csv( "/kaggle/input/epidemiological-data-from-the-covid-19-outbreak/data.csv" ) # **** # ## Analyse du dataset print("Data types : ", df.info())
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/031/129031454.ipynb
null
null
[{"Id": 129031454, "ScriptId": 38354681, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15037675, "CreationDate": "05/10/2023 12:51:08", "VersionNumber": 1.0, "Title": "notebookb5b2a91504", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 76.0, "LinesInsertedFromPrevious": 76.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Introduction à l'IA # ## Initialiation du projet import numpy as np import pandas as pd df = pd.read_csv( "/kaggle/input/epidemiological-data-from-the-covid-19-outbreak/data.csv" ) # **** # ## Analyse du dataset print("Data types : ", df.info())
false
0
89
0
89
89
129031792
import numpy as np import tensorflow as tf import tensorflow as tf import urllib import zipfile from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.optimizers import RMSprop import json import tensorflow as tf import numpy as np import urllib from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences import tensorflow_hub as hub import pandas as pd import tensorflow as tf def solution_model(): xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ys = np.array([5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype=float) # YOUR CODE HERE model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[1])]) model.compile(loss="mean_squared_error", optimizer="sgd") model.fit(xs, ys, epochs=1000) return model if __name__ == "__main__": model = solution_model() model.save("mymodel.h5") def solution_model(): fashion_mnist = tf.keras.datasets.fashion_mnist # YOUR CODE HERE (training_images, training_labels), ( val_images, val_label, ) = fashion_mnist.load_data() training_images = training_images / 255.0 val_images = val_images / 255.0 training_images = np.expand_dims(training_images, axis=3) val_images = np.expand_dims(val_images, axis=3) model = tf.keras.Sequential( [ tf.keras.layers.Conv2D( 14, (3, 3), activation="relu", input_shape=(28, 28, 1) ), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(10, activation="softmax"), ] ) model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["acc"] ) model.fit( training_images, training_labels, validation_data=(val_images, val_label), epochs=5, ) return model if __name__ == "__main__": model = solution_model() model.save("mymodel.h5") def solution_model(): _TRAIN_URL = ( "https://storage.googleapis.com/download.tensorflow.org/data/horse-or-human.zip" ) _TEST_URL = "https://storage.googleapis.com/download.tensorflow.org/data/validation-horse-or-human.zip" urllib.request.urlretrieve(_TRAIN_URL, "horse-or-human.zip") local_zip = "horse-or-human.zip" zip_ref = zipfile.ZipFile(local_zip, "r") zip_ref.extractall("tmp/horse-or-human/") zip_ref.close() urllib.request.urlretrieve(_TEST_URL, "testdata.zip") local_zip = "testdata.zip" zip_ref = zipfile.ZipFile(local_zip, "r") zip_ref.extractall("tmp/testdata/") zip_ref.close() training_data = "tmp/horse-or-human/" val_data = "tmp/testdata/" train_datagen = ImageDataGenerator( rescale=1.0 / 255, ) validation_datagen = ImageDataGenerator(rescale=1.0 / 255.0) train_generator = train_datagen.flow_from_directory( training_data, target_size=(300, 300), batch_size=128, class_mode="binary" ) validation_generator = validation_datagen.flow_from_directory( val_data, target_size=(300, 300), batch_size=64, class_mode="binary" ) model = tf.keras.models.Sequential( [ # Note the input shape specified on your first layer must be (300,300,3) # Your Code here tf.keras.layers.Conv2D( 16, (3, 3), activation="relu", input_shape=(300, 300, 3) ), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation="relu"), # This is the last layer. You should not change this code. tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model.compile( loss="binary_crossentropy", optimizer=RMSprop(lr=0.001), metrics=["accuracy"] ) model.fit( train_generator, epochs=10, verbose=1, validation_data=validation_generator ) return model if __name__ == "__main__": model = solution_model() model.save("mymodel2.h5") def solution_model(): url = "https://storage.googleapis.com/download.tensorflow.org/data/sarcasm.json" urllib.request.urlretrieve(url, "sarcasm.json") # DO NOT CHANGE THIS CODE OR THE TESTS MAY NOT WORK vocab_size = 1000 embedding_dim = 16 max_length = 120 trunc_type = "post" padding_type = "post" oov_tok = "<OOV>" training_size = 20000 sentences = [] labels = [] # YOUR CODE HERE with open("sarcasm.json", "r") as f: data = json.load(f) for text in data: sentences.append(text["headline"]) labels.append(text["is_sarcastic"]) train_sentences = sentences[:training_size] test_sentences = sentences[training_size:] train_labels = labels[:training_size] test_labels = labels[training_size:] tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(train_sentences) # Sequen n Padded train_sequences = tokenizer.texts_to_sequences(train_sentences) train_padded = pad_sequences( train_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type ) test_sequences = tokenizer.texts_to_sequences(test_sentences) test_padded = pad_sequences( test_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type ) train_labels = np.array(train_labels) test_labels = np.array(test_labels) model = tf.keras.Sequential( [ # YOUR CODE HERE. KEEP THIS OUTPUT LAYER INTACT OR TESTS MAY FAIL tf.keras.layers.Embedding( vocab_size, embedding_dim, input_length=max_length ), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(24, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["acc"]) model.fit( train_padded, train_labels, epochs=10, validation_data=(test_padded, test_labels), ) return model if __name__ == "__main__": model = solution_model() model.save("mymodel6.h5") def normalize_series(data, min, max): data = data - min data = data / max return data # This function is used to map the time series dataset into windows of # features and respective targets, to prepare it for training and validation. # The first element of the first window will be the first element of # the dataset. # # Consecutive windows are constructed by shifting the starting position # of the first window forward, one at a time (indicated by shift=1). # # For a window of n_past number of observations of the time # indexed variable in the dataset, the target for the window is the next # n_future number of observations of the variable, after the # end of the window. # DO NOT CHANGE THIS. def windowed_dataset(series, batch_size, n_past=10, n_future=10, shift=1): ds = tf.data.Dataset.from_tensor_slices(series) ds = ds.window(size=n_past + n_future, shift=shift, drop_remainder=True) ds = ds.flat_map(lambda w: w.batch(n_past + n_future)) ds = ds.map(lambda w: (w[:n_past], w[n_past:])) return ds.batch(batch_size).prefetch(1) # This function loads the data from the CSV file, normalizes the data and # splits the dataset into train and validation data. It also uses # windowed_dataset() to split the data into windows of observations and # targets. Finally it defines, compiles and trains a neural network. This # function returns the final trained model. # COMPLETE THE CODE IN THIS FUNCTION def solution_model(): # DO NOT CHANGE THIS CODE # Reads the dataset. df = pd.read_csv( "Weekly_U.S.Diesel_Retail_Prices.csv", infer_datetime_format=True, index_col="Week of", header=0, ) # Number of features in the dataset. We use all features as predictors to # predict all features of future time steps. N_FEATURES = len(df.columns) # DO NOT CHANGE THIS # Normalizes the data data = df.values data = normalize_series(data, data.min(axis=0), data.max(axis=0)) # Splits the data into training and validation sets. SPLIT_TIME = int(len(data) * 0.8) # DO NOT CHANGE THIS x_train = data[:SPLIT_TIME] x_valid = data[SPLIT_TIME:] # DO NOT CHANGE THIS CODE tf.keras.backend.clear_session() tf.random.set_seed(42) # DO NOT CHANGE BATCH_SIZE IF YOU ARE USING STATEFUL LSTM/RNN/GRU. # THE TEST WILL FAIL TO GRADE YOUR SCORE IN SUCH CASES. # In other cases, it is advised not to change the batch size since it # might affect your final scores. While setting it to a lower size # might not do any harm, higher sizes might affect your scores. BATCH_SIZE = 32 # ADVISED NOT TO CHANGE THIS # DO NOT CHANGE N_PAST, N_FUTURE, SHIFT. The tests will fail to run # on the server. # Number of past time steps based on which future observations should be # predicted N_PAST = 10 # DO NOT CHANGE THIS # Number of future time steps which are to be predicted. N_FUTURE = 10 # DO NOT CHANGE THIS # By how many positions the window slides to create a new window # of observations. SHIFT = 1 # DO NOT CHANGE THIS # Code to create windowed train and validation datasets. train_set = windowed_dataset( series=x_train, batch_size=BATCH_SIZE, n_past=N_PAST, n_future=N_FUTURE, shift=SHIFT, ) valid_set = windowed_dataset( series=x_valid, batch_size=BATCH_SIZE, n_past=N_PAST, n_future=N_FUTURE, shift=SHIFT, ) # Code to define your model. encoder_inputs = tf.keras.layers.Input(shape=(n_past, n_features)) encoder_l1 = tf.keras.layers.LSTM(100, return_state=True) encoder_outputs1 = encoder_l1(encoder_inputs) encoder_states1 = encoder_outputs1[1:] # decoder_inputs = tf.keras.layers.RepeatVector(n_future)(encoder_outputs1[0]) # decoder_l1 = tf.keras.layers.LSTM(100, return_sequences=True)( decoder_inputs, initial_state=encoder_states1 ) decoder_outputs1 = tf.keras.layers.TimeDistributed( tf.keras.layers.Dense(n_features) )(decoder_l1) # model_e1d1 = tf.keras.models.Model(encoder_inputs, decoder_outputs1) # Code to train and compile the model optimizer = tf.keras.optimizers.SGD(learning_rate=1e-5, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) model.fit(train_set, validation_data=valid_set, epochs=30) return model if __name__ == "__main__": model = solution_model() model.save("c5q12.h5")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/031/129031792.ipynb
null
null
[{"Id": 129031792, "ScriptId": 28639950, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7136013, "CreationDate": "05/10/2023 12:54:00", "VersionNumber": 1.0, "Title": "TensorFlow Developer Test", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 321.0, "LinesInsertedFromPrevious": 321.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import tensorflow as tf import tensorflow as tf import urllib import zipfile from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.optimizers import RMSprop import json import tensorflow as tf import numpy as np import urllib from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences import tensorflow_hub as hub import pandas as pd import tensorflow as tf def solution_model(): xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ys = np.array([5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype=float) # YOUR CODE HERE model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[1])]) model.compile(loss="mean_squared_error", optimizer="sgd") model.fit(xs, ys, epochs=1000) return model if __name__ == "__main__": model = solution_model() model.save("mymodel.h5") def solution_model(): fashion_mnist = tf.keras.datasets.fashion_mnist # YOUR CODE HERE (training_images, training_labels), ( val_images, val_label, ) = fashion_mnist.load_data() training_images = training_images / 255.0 val_images = val_images / 255.0 training_images = np.expand_dims(training_images, axis=3) val_images = np.expand_dims(val_images, axis=3) model = tf.keras.Sequential( [ tf.keras.layers.Conv2D( 14, (3, 3), activation="relu", input_shape=(28, 28, 1) ), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(10, activation="softmax"), ] ) model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["acc"] ) model.fit( training_images, training_labels, validation_data=(val_images, val_label), epochs=5, ) return model if __name__ == "__main__": model = solution_model() model.save("mymodel.h5") def solution_model(): _TRAIN_URL = ( "https://storage.googleapis.com/download.tensorflow.org/data/horse-or-human.zip" ) _TEST_URL = "https://storage.googleapis.com/download.tensorflow.org/data/validation-horse-or-human.zip" urllib.request.urlretrieve(_TRAIN_URL, "horse-or-human.zip") local_zip = "horse-or-human.zip" zip_ref = zipfile.ZipFile(local_zip, "r") zip_ref.extractall("tmp/horse-or-human/") zip_ref.close() urllib.request.urlretrieve(_TEST_URL, "testdata.zip") local_zip = "testdata.zip" zip_ref = zipfile.ZipFile(local_zip, "r") zip_ref.extractall("tmp/testdata/") zip_ref.close() training_data = "tmp/horse-or-human/" val_data = "tmp/testdata/" train_datagen = ImageDataGenerator( rescale=1.0 / 255, ) validation_datagen = ImageDataGenerator(rescale=1.0 / 255.0) train_generator = train_datagen.flow_from_directory( training_data, target_size=(300, 300), batch_size=128, class_mode="binary" ) validation_generator = validation_datagen.flow_from_directory( val_data, target_size=(300, 300), batch_size=64, class_mode="binary" ) model = tf.keras.models.Sequential( [ # Note the input shape specified on your first layer must be (300,300,3) # Your Code here tf.keras.layers.Conv2D( 16, (3, 3), activation="relu", input_shape=(300, 300, 3) ), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation="relu"), # This is the last layer. You should not change this code. tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model.compile( loss="binary_crossentropy", optimizer=RMSprop(lr=0.001), metrics=["accuracy"] ) model.fit( train_generator, epochs=10, verbose=1, validation_data=validation_generator ) return model if __name__ == "__main__": model = solution_model() model.save("mymodel2.h5") def solution_model(): url = "https://storage.googleapis.com/download.tensorflow.org/data/sarcasm.json" urllib.request.urlretrieve(url, "sarcasm.json") # DO NOT CHANGE THIS CODE OR THE TESTS MAY NOT WORK vocab_size = 1000 embedding_dim = 16 max_length = 120 trunc_type = "post" padding_type = "post" oov_tok = "<OOV>" training_size = 20000 sentences = [] labels = [] # YOUR CODE HERE with open("sarcasm.json", "r") as f: data = json.load(f) for text in data: sentences.append(text["headline"]) labels.append(text["is_sarcastic"]) train_sentences = sentences[:training_size] test_sentences = sentences[training_size:] train_labels = labels[:training_size] test_labels = labels[training_size:] tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(train_sentences) # Sequen n Padded train_sequences = tokenizer.texts_to_sequences(train_sentences) train_padded = pad_sequences( train_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type ) test_sequences = tokenizer.texts_to_sequences(test_sentences) test_padded = pad_sequences( test_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type ) train_labels = np.array(train_labels) test_labels = np.array(test_labels) model = tf.keras.Sequential( [ # YOUR CODE HERE. KEEP THIS OUTPUT LAYER INTACT OR TESTS MAY FAIL tf.keras.layers.Embedding( vocab_size, embedding_dim, input_length=max_length ), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(24, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["acc"]) model.fit( train_padded, train_labels, epochs=10, validation_data=(test_padded, test_labels), ) return model if __name__ == "__main__": model = solution_model() model.save("mymodel6.h5") def normalize_series(data, min, max): data = data - min data = data / max return data # This function is used to map the time series dataset into windows of # features and respective targets, to prepare it for training and validation. # The first element of the first window will be the first element of # the dataset. # # Consecutive windows are constructed by shifting the starting position # of the first window forward, one at a time (indicated by shift=1). # # For a window of n_past number of observations of the time # indexed variable in the dataset, the target for the window is the next # n_future number of observations of the variable, after the # end of the window. # DO NOT CHANGE THIS. def windowed_dataset(series, batch_size, n_past=10, n_future=10, shift=1): ds = tf.data.Dataset.from_tensor_slices(series) ds = ds.window(size=n_past + n_future, shift=shift, drop_remainder=True) ds = ds.flat_map(lambda w: w.batch(n_past + n_future)) ds = ds.map(lambda w: (w[:n_past], w[n_past:])) return ds.batch(batch_size).prefetch(1) # This function loads the data from the CSV file, normalizes the data and # splits the dataset into train and validation data. It also uses # windowed_dataset() to split the data into windows of observations and # targets. Finally it defines, compiles and trains a neural network. This # function returns the final trained model. # COMPLETE THE CODE IN THIS FUNCTION def solution_model(): # DO NOT CHANGE THIS CODE # Reads the dataset. df = pd.read_csv( "Weekly_U.S.Diesel_Retail_Prices.csv", infer_datetime_format=True, index_col="Week of", header=0, ) # Number of features in the dataset. We use all features as predictors to # predict all features of future time steps. N_FEATURES = len(df.columns) # DO NOT CHANGE THIS # Normalizes the data data = df.values data = normalize_series(data, data.min(axis=0), data.max(axis=0)) # Splits the data into training and validation sets. SPLIT_TIME = int(len(data) * 0.8) # DO NOT CHANGE THIS x_train = data[:SPLIT_TIME] x_valid = data[SPLIT_TIME:] # DO NOT CHANGE THIS CODE tf.keras.backend.clear_session() tf.random.set_seed(42) # DO NOT CHANGE BATCH_SIZE IF YOU ARE USING STATEFUL LSTM/RNN/GRU. # THE TEST WILL FAIL TO GRADE YOUR SCORE IN SUCH CASES. # In other cases, it is advised not to change the batch size since it # might affect your final scores. While setting it to a lower size # might not do any harm, higher sizes might affect your scores. BATCH_SIZE = 32 # ADVISED NOT TO CHANGE THIS # DO NOT CHANGE N_PAST, N_FUTURE, SHIFT. The tests will fail to run # on the server. # Number of past time steps based on which future observations should be # predicted N_PAST = 10 # DO NOT CHANGE THIS # Number of future time steps which are to be predicted. N_FUTURE = 10 # DO NOT CHANGE THIS # By how many positions the window slides to create a new window # of observations. SHIFT = 1 # DO NOT CHANGE THIS # Code to create windowed train and validation datasets. train_set = windowed_dataset( series=x_train, batch_size=BATCH_SIZE, n_past=N_PAST, n_future=N_FUTURE, shift=SHIFT, ) valid_set = windowed_dataset( series=x_valid, batch_size=BATCH_SIZE, n_past=N_PAST, n_future=N_FUTURE, shift=SHIFT, ) # Code to define your model. encoder_inputs = tf.keras.layers.Input(shape=(n_past, n_features)) encoder_l1 = tf.keras.layers.LSTM(100, return_state=True) encoder_outputs1 = encoder_l1(encoder_inputs) encoder_states1 = encoder_outputs1[1:] # decoder_inputs = tf.keras.layers.RepeatVector(n_future)(encoder_outputs1[0]) # decoder_l1 = tf.keras.layers.LSTM(100, return_sequences=True)( decoder_inputs, initial_state=encoder_states1 ) decoder_outputs1 = tf.keras.layers.TimeDistributed( tf.keras.layers.Dense(n_features) )(decoder_l1) # model_e1d1 = tf.keras.models.Model(encoder_inputs, decoder_outputs1) # Code to train and compile the model optimizer = tf.keras.optimizers.SGD(learning_rate=1e-5, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) model.fit(train_set, validation_data=valid_set, epochs=30) return model if __name__ == "__main__": model = solution_model() model.save("c5q12.h5")
false
0
3,361
0
3,361
3,361
129476476
<jupyter_start><jupyter_text>The shortest path data This is a small project that uses optimization algorithms to find the five shortest paths. However, every spot has a limit; every spot ball couldn't exceed 100. Kaggle dataset identifier: ga-optimization <jupyter_script>import pandas as pd import gurobipy as gp from gurobipy import * import math as m import random as rand import networkx as nx import matplotlib.pyplot as plt data = pd.read_excel(r"/kaggle/input/ga-optimization/AI term project.xlsx") # #### 先將題目縮小至只有前10個點 data_5 = data[:5] data_5 def distance(x1, x2, y1, y2): dis = m.pow(m.pow((x1 - x2), 2) + m.pow((y1 - y2), 2), 0.5) return round(dis, 4) distance(3, 4, 1, 2) all_nodes_connect = [] for node_x in range(0, len(data_5["X"])): for node_y in range(0, len(data_5["Y"])): all_nodes_connect.append((node_x, node_y)) for_trans_matrix = list() for nodes in all_nodes_connect: if nodes[0] != nodes[1]: dis = distance( data_5["X"].iloc[nodes[0]], data_5["X"].iloc[nodes[1]], data_5["Y"].iloc[nodes[0]], data_5["Y"].iloc[nodes[1]], ) for_trans_matrix.append([nodes, dis]) # for_trans_matrix # ### Set the parameters # * cost matrix # * N : the numbers of nodes # * K : the type of vehicles # * C : Capacity of every vehicles # * M : the number of all vehicles cost_matrix = tupledict(for_trans_matrix) # N =list(range(0,len(data['寶可夢座標點']))) N = list(range(0, len(data_5["寶可夢座標點"]))) K = [1] C = {1: 100} # M = {1:5} model = Model(name="VRP") cost_matrix.keys()[1][0], cost_matrix.keys()[1][1] cost_matrix # ## 增設決策變數 X = {} for i, one in zip(cost_matrix.keys(), range(0, len(cost_matrix.keys()))): if i[0] != i[1]: index_ = "x" + str(i[0]) + "," + str(i[1]) # print(index_) X[i] = model.addVar( vtype=GRB.BINARY, name=index_ ) # 在這邊已經將變數全部放入置一個dict裡面,故下面若要呼叫變數,必須以dict的方式呼叫 model.update() decision_list = list(X.values()) # decision_list # nx.Graph() 無向圖 graph = nx.DiGraph() # 有向圖 color = list() for node in N: if node == 0: graph.add_node(node) color.append("red") else: graph.add_node(node) color.append("gray") for key, values in zip(cost_matrix.keys(), cost_matrix.values()): graph.add_edge(key[0], key[1], weight=values) weight = nx.get_edge_attributes(graph, "weight") pos = nx.get_node_attributes(graph, "pos") nx.draw(graph, node_color=color, with_labels=True) # dj = nx.shortest_path(graph,source =1 ,target = 0) # print(dj) # plt.figure(figsize = (15,15)) # nx.draw_networkx_edge_labels(graph,pos,edge_labels = weight) #產出各節點 decision_list[0] # ## 增設目標式 model.setObjective( gp.quicksum( cost_matrix.values()[cost] * decision for decision, cost in zip(decision_list, range(0, len(cost_matrix))) ), GRB.MINIMIZE, ) # 記得他的目標式一定要用這種FORM! model.update() decision_list[0] # model.addConstr(gp.quicksum(decision * )) # ### 將現況的函數設定至文件中,可用txt打開 model.write("/kaggle/working/vrp_model.lp")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/476/129476476.ipynb
ga-optimization
yinn94
[{"Id": 129476476, "ScriptId": 38338780, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8215644, "CreationDate": "05/14/2023 06:31:36", "VersionNumber": 5.0, "Title": "Gurobipy way find the shortest", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 113.0, "LinesInsertedFromPrevious": 44.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 69.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185569519, "KernelVersionId": 129476476, "SourceDatasetVersionId": 5292295}]
[{"Id": 5292295, "DatasetId": 3077588, "DatasourceVersionId": 5365381, "CreatorUserId": 8215644, "LicenseName": "Unknown", "CreationDate": "04/02/2023 06:54:31", "VersionNumber": 1.0, "Title": "The shortest path data", "Slug": "ga-optimization", "Subtitle": "Using optimization to find the shortest path.", "Description": "This is a small project that uses optimization algorithms to find the five shortest paths. However, every spot has a limit; every spot ball couldn't exceed 100.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3077588, "CreatorUserId": 8215644, "OwnerUserId": 8215644.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5292295.0, "CurrentDatasourceVersionId": 5365381.0, "ForumId": 3140511, "Type": 2, "CreationDate": "04/02/2023 06:54:31", "LastActivityDate": "04/02/2023", "TotalViews": 58, "TotalDownloads": 8, "TotalVotes": 0, "TotalKernels": 3}]
[{"Id": 8215644, "UserName": "yinn94", "DisplayName": "yinn94", "RegisterDate": "08/26/2021", "PerformanceTier": 1}]
import pandas as pd import gurobipy as gp from gurobipy import * import math as m import random as rand import networkx as nx import matplotlib.pyplot as plt data = pd.read_excel(r"/kaggle/input/ga-optimization/AI term project.xlsx") # #### 先將題目縮小至只有前10個點 data_5 = data[:5] data_5 def distance(x1, x2, y1, y2): dis = m.pow(m.pow((x1 - x2), 2) + m.pow((y1 - y2), 2), 0.5) return round(dis, 4) distance(3, 4, 1, 2) all_nodes_connect = [] for node_x in range(0, len(data_5["X"])): for node_y in range(0, len(data_5["Y"])): all_nodes_connect.append((node_x, node_y)) for_trans_matrix = list() for nodes in all_nodes_connect: if nodes[0] != nodes[1]: dis = distance( data_5["X"].iloc[nodes[0]], data_5["X"].iloc[nodes[1]], data_5["Y"].iloc[nodes[0]], data_5["Y"].iloc[nodes[1]], ) for_trans_matrix.append([nodes, dis]) # for_trans_matrix # ### Set the parameters # * cost matrix # * N : the numbers of nodes # * K : the type of vehicles # * C : Capacity of every vehicles # * M : the number of all vehicles cost_matrix = tupledict(for_trans_matrix) # N =list(range(0,len(data['寶可夢座標點']))) N = list(range(0, len(data_5["寶可夢座標點"]))) K = [1] C = {1: 100} # M = {1:5} model = Model(name="VRP") cost_matrix.keys()[1][0], cost_matrix.keys()[1][1] cost_matrix # ## 增設決策變數 X = {} for i, one in zip(cost_matrix.keys(), range(0, len(cost_matrix.keys()))): if i[0] != i[1]: index_ = "x" + str(i[0]) + "," + str(i[1]) # print(index_) X[i] = model.addVar( vtype=GRB.BINARY, name=index_ ) # 在這邊已經將變數全部放入置一個dict裡面,故下面若要呼叫變數,必須以dict的方式呼叫 model.update() decision_list = list(X.values()) # decision_list # nx.Graph() 無向圖 graph = nx.DiGraph() # 有向圖 color = list() for node in N: if node == 0: graph.add_node(node) color.append("red") else: graph.add_node(node) color.append("gray") for key, values in zip(cost_matrix.keys(), cost_matrix.values()): graph.add_edge(key[0], key[1], weight=values) weight = nx.get_edge_attributes(graph, "weight") pos = nx.get_node_attributes(graph, "pos") nx.draw(graph, node_color=color, with_labels=True) # dj = nx.shortest_path(graph,source =1 ,target = 0) # print(dj) # plt.figure(figsize = (15,15)) # nx.draw_networkx_edge_labels(graph,pos,edge_labels = weight) #產出各節點 decision_list[0] # ## 增設目標式 model.setObjective( gp.quicksum( cost_matrix.values()[cost] * decision for decision, cost in zip(decision_list, range(0, len(cost_matrix))) ), GRB.MINIMIZE, ) # 記得他的目標式一定要用這種FORM! model.update() decision_list[0] # model.addConstr(gp.quicksum(decision * )) # ### 將現況的函數設定至文件中,可用txt打開 model.write("/kaggle/working/vrp_model.lp")
false
0
1,103
0
1,161
1,103
129476225
# loading libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from tensorflow import keras from tensorflow.keras.layers import Input, Dense from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import activations # loading datasets path_train = "/kaggle/input/icr-identify-age-related-conditions/train.csv" path_test = "/kaggle/input/icr-identify-age-related-conditions/test.csv" path_submis = "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" path_greeks = "/kaggle/input/icr-identify-age-related-conditions/greeks.csv" train = pd.read_csv(path_train).drop(columns="Id") test = pd.read_csv(path_test).drop(columns="Id") greeks = pd.read_csv(path_greeks) train["EJ"] = train["EJ"].map({"A": 0, "B": 1}) test["EJ"] = test["EJ"].map({"A": 0, "B": 1}) # shape for each datasets print(f"Shape of the train data : {train.shape}") print(f"Shape of the test data : {test.shape}") # checking missing values train dataset train_miss = train.isnull().sum() print(f"Column Count") for index, row in train_miss[train_miss > 0].items(): print(f"{index} {row}") # ***We can use visualization techniques to discover missing values. The heatmap is appropriate for visualization. Each line indicates missing data in a row.*** plt.figure(figsize=(16, 14)) sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="PuBuGn") plt.show() # ***There are some common methods for handling missing values in a Pandas DataFrame: fillna(), interpolate() and SimpleImputer from sklearn.impute*** # fill missing values with the mean of the column train_mean_filled = train.copy() train_mean_filled.fillna(train_mean_filled.mean(), inplace=True) # correlation coefficent columns for target corr_target = train_mean_filled.corrwith(train_mean_filled["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() # interpolate missing values using linear interpolation train_interpolate = train.copy() train_interpolate.interpolate(method="polynomial", order=5) # correlation coefficent columns for target corr_target = train_interpolate.corrwith(train_interpolate["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() from sklearn.impute import SimpleImputer # create an imputer object and fit it to the data imputer = SimpleImputer(strategy="mean") imputer.fit(train) # transform the data and replace missing values train_imputed = pd.DataFrame(imputer.transform(train), columns=train.columns) # correlation coefficent columns for target corr_target = train_imputed.corrwith(train_imputed["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() corr = train.iloc[:, 1:].corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.figure(figsize=(16, 14)) ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True, mask=mask, ) ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment="right") labels = ["Class 0", "Class 1"] sizes = [train["Class"].tolist().count(0), train["Class"].tolist().count(1)] explode = (0, 0.1) fig, ax = plt.subplots() ax.pie( sizes, explode=explode, labels=labels, autopct="%1.2f%%", shadow=True, startangle=180, ) plt.show() # Condition the regression fit on another variable and represent it using color sns.lmplot(data=train_mean_filled, x="AB", y="AZ", hue="Class")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/476/129476225.ipynb
null
null
[{"Id": 129476225, "ScriptId": 38471728, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9401530, "CreationDate": "05/14/2023 06:29:35", "VersionNumber": 1.0, "Title": "Age-Related Conditions EDA and Classification", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 141.0, "LinesInsertedFromPrevious": 141.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# loading libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from tensorflow import keras from tensorflow.keras.layers import Input, Dense from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import activations # loading datasets path_train = "/kaggle/input/icr-identify-age-related-conditions/train.csv" path_test = "/kaggle/input/icr-identify-age-related-conditions/test.csv" path_submis = "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" path_greeks = "/kaggle/input/icr-identify-age-related-conditions/greeks.csv" train = pd.read_csv(path_train).drop(columns="Id") test = pd.read_csv(path_test).drop(columns="Id") greeks = pd.read_csv(path_greeks) train["EJ"] = train["EJ"].map({"A": 0, "B": 1}) test["EJ"] = test["EJ"].map({"A": 0, "B": 1}) # shape for each datasets print(f"Shape of the train data : {train.shape}") print(f"Shape of the test data : {test.shape}") # checking missing values train dataset train_miss = train.isnull().sum() print(f"Column Count") for index, row in train_miss[train_miss > 0].items(): print(f"{index} {row}") # ***We can use visualization techniques to discover missing values. The heatmap is appropriate for visualization. Each line indicates missing data in a row.*** plt.figure(figsize=(16, 14)) sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="PuBuGn") plt.show() # ***There are some common methods for handling missing values in a Pandas DataFrame: fillna(), interpolate() and SimpleImputer from sklearn.impute*** # fill missing values with the mean of the column train_mean_filled = train.copy() train_mean_filled.fillna(train_mean_filled.mean(), inplace=True) # correlation coefficent columns for target corr_target = train_mean_filled.corrwith(train_mean_filled["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() # interpolate missing values using linear interpolation train_interpolate = train.copy() train_interpolate.interpolate(method="polynomial", order=5) # correlation coefficent columns for target corr_target = train_interpolate.corrwith(train_interpolate["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() from sklearn.impute import SimpleImputer # create an imputer object and fit it to the data imputer = SimpleImputer(strategy="mean") imputer.fit(train) # transform the data and replace missing values train_imputed = pd.DataFrame(imputer.transform(train), columns=train.columns) # correlation coefficent columns for target corr_target = train_imputed.corrwith(train_imputed["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() corr = train.iloc[:, 1:].corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.figure(figsize=(16, 14)) ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True, mask=mask, ) ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment="right") labels = ["Class 0", "Class 1"] sizes = [train["Class"].tolist().count(0), train["Class"].tolist().count(1)] explode = (0, 0.1) fig, ax = plt.subplots() ax.pie( sizes, explode=explode, labels=labels, autopct="%1.2f%%", shadow=True, startangle=180, ) plt.show() # Condition the regression fit on another variable and represent it using color sns.lmplot(data=train_mean_filled, x="AB", y="AZ", hue="Class")
false
0
1,277
0
1,277
1,277
129595038
import numpy as np import pandas as pd from datasets import load_dataset import nltk from nltk.tokenize import sent_tokenize from transformers import pipeline, set_seed from datasets import load_metric # # 一、 Dataset # 用于摘要的规范数据集 `CNN/DailyMail corpus.` # - 包含300,000对数据 # - 新闻文章 -- 相关摘要 # - 由美国有线电视新闻网和《每日邮报》在文章中附上的要点组成 # - 摘要是抽象的,而不是提取的,这意味着它们由新的句子组成,而不是简单的摘录 dataset = load_dataset("cnn_dailymail", version="3.0.0") print(f'Features: {dataset["train"].column_names}') sample = dataset["train"][1] print( f""" Article (excerpt of 500 characters, total length: {len(sample["article"])}): """ ) print(sample["article"][:500]) print(f'\nSummary (length: {len(sample["highlights"])}):') print(sample["highlights"]) # 文本非常长,输入文章有时候会是summary的长度的17倍。长句子的输入对于transformer来说是一个很大的挑战。 # - 处理方式: 即使末尾也有一些信息。我们仍然需要基于选择的模型的最大token对句子进行裁剪 # # 二、 pipeline sample_text = dataset["train"][1]["article"][:2000] summaries = {} nltk.download("punkt") str_ = "The U.S. are a country. The U.N. is an organization." sent_tokenize(str_) # ## 2.1 baseline # 就用文章前三句作为摘要。 def three_sentence_summary(text): return "\n".join(sent_tokenize(text)[:3]) summaries["baseline"] = three_sentence_summary(sample_text) # ## 2.2 gpt-2 # 生成摘要需要增加`\nTL;DR:\n` # - too long; didn't read # - 经常在Reddit等平台上使用,表示长帖子的短版本 set_seed(42) pipe_ = pipeline("text-generation", model="gpt2") gpt2_query = sample_text + "\nTL;DR:\n" pipe_out = pipe_(gpt2_query, max_length=512, clean_up_tokenization_spaces=True) summaries["gpt2"] = "\n".join( sent_tokenize(pipe_out[0]["generated_text"][len(gpt2_query) :]) ) # ## 2.3 T5(`Text-to-Text Transfer Transformer`) # 用混合数据训练 # - 无监督数据: 重建masked单词 # - 监督数据: 一些任务学习 # - 文本摘要: 如"summarize:" # - 翻译: 如"translate English to German:" pipe_ = pipeline("summarization", model="t5-large") pipe_out = pipe_(sample_text) summaries["t5"] = "\n".join(sent_tokenize(pipe_out[0]["summary_text"])) # ## 2.4 BART # > encoder-decoder 结构 # 结合BERT和GPT-2的preTrain方法 pipe_ = pipeline("summarization", model="facebook/bart-large-cnn") pipe_out = pipe_(sample_text) summaries["bart"] = "\n".join(sent_tokenize(pipe_out[0]["summary_text"])) # ## 2.5 PEGASUS # 同样是encode-decoder结构 # 为了找到一个比一般语言建模更接近摘要的预训练目标,他们在一个非常大的语料库中自动识别包含其周围段落的大部分内容的句子(使用摘要评估指标作为内容重叠的启发式方法),并预训练PEGASUS模型来重建这些句子,从而获得用于文本摘要的最先进的模型。 pipe_ = pipeline("summarization", model="google/pegasus-cnn_dailymail") pipe_out = pipe_(sample_text) summaries["pegasus"] = pipe_out[0]["summary_text"].replace(" .<n>", ".\n") # ## 比对 print("GROUND TRUTH") print(dataset["train"][1]["highlights"]) print("") for model_name in summaries: print(model_name.upper()) print("--" * 25) print(summaries[model_name]) print("") # # 三、文本生成评估指标 # ## 3.1 BLEU (`precision-based metric`) # - 评估准确率: 生成句子中有m个单词出现在原文(n个词)中, $bleu=\frac{m}{n}$ # - 引起问题1: # - 如果生成重复的词,并且该词在引用中出现,那么我们会得到较高的分数 # - 针对这点作者指出修正方法:一个单词只计算它在引用中出现的次数。 # - example: ref-"the cat is on the mat" g-"the the the the the the" # - $P_{vanlilla}=\frac{6}{6}$, $P_{mod}=\frac{2}{6}$ # - 修正问题1:`clip` # - 这意味着一个`n-gram`的出现次数以它在参考句中出现的次数为上限 # $$p_n=\frac{ \sum_{geSnt \in C}\sum_{n-gram \in geSnt} Count_{clip}(n-gram) }{ \sum_{geSnt \in C}\sum_{n-gram \in geSnt} Count(n-gram) }$$ # - 引起问题2: # - 因为这个准确率的评估,很显然会对较短的评估对有力,会低估较长生成的结果。 # - 修正问题2:简短惩罚 `brevity penalty` # - $BR = min(1, e^{1 - \frac{l_{ref}}{l_{gen}}} )$ : 生成长度大于原句子:1, 生成长度小于原句子:$(0, 1)$, # **最终公式:** # $$BLEU-N=BR * (\prod_{n=1}^N p_n)^{1/N}$$ # Example: 计算`BLEU-4` # - ref-"the cat sat on the mat" # - g-"the cat the cat is on the mat" # - **BR**: $BR=min(1, e^{1-6/8})=1$ # - **n=1** # - 1-gram: org:{"the", "cat", "sat", "on", "mat"} ge:{"the", "cat", "is", "on", "mat"} # - clip: $count_{clip}("the") = 2, count_{clip}("cat") = 1, count_{clip}("is") = 0, 1-gram \in geSnt$ # - $p_1 = \frac{5}{8} $ # - **n=2** # - 2-gram: org:{"the cat", "cat sat", "sat on", "on the", "the mat"} ge:{"the cat", "cat the", "cat is", "is on", "on the", "the mat"} # - $p_2 = \frac{3}{7} $ # - **n=3** # - 3-gram: org:{"the cat sat", "cat sat on", "sat on the", "on the mat"} ge:{"the cat the", "cat the cat", "the cat is", "cat is on", "is on the", "on the mat"} # - $p_3 = \frac{1}{6} $ # - **n=4** # - 3-gram: org:{"the cat sat on", "cat sat on the", "sat on the mat"} ge:{"the cat the cat", "cat the cat is", "the cat is on", "cat is on the", "is on the mat"} # - $p_4 = \frac{0}{5} $ # - **BLEU-4**: $1 * (\frac{5}{8}*\frac{3}{7}*\frac{1}{6}*\frac{0}{5})^{1/2}=0.$ # bleu_metric = load_metric("sacrebleu") bleu_metric.add( prediction="the cat the cat is on the mat", reference=["the cat sat on the mat"] ) results = bleu_metric.compute(smooth_method="floor", smooth_value=0) results
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/595/129595038.ipynb
null
null
[{"Id": 129595038, "ScriptId": 38530239, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1679103, "CreationDate": "05/15/2023 05:53:34", "VersionNumber": 1.0, "Title": "NLPTransformers-Chapter6-Summarization", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 185.0, "LinesInsertedFromPrevious": 185.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd from datasets import load_dataset import nltk from nltk.tokenize import sent_tokenize from transformers import pipeline, set_seed from datasets import load_metric # # 一、 Dataset # 用于摘要的规范数据集 `CNN/DailyMail corpus.` # - 包含300,000对数据 # - 新闻文章 -- 相关摘要 # - 由美国有线电视新闻网和《每日邮报》在文章中附上的要点组成 # - 摘要是抽象的,而不是提取的,这意味着它们由新的句子组成,而不是简单的摘录 dataset = load_dataset("cnn_dailymail", version="3.0.0") print(f'Features: {dataset["train"].column_names}') sample = dataset["train"][1] print( f""" Article (excerpt of 500 characters, total length: {len(sample["article"])}): """ ) print(sample["article"][:500]) print(f'\nSummary (length: {len(sample["highlights"])}):') print(sample["highlights"]) # 文本非常长,输入文章有时候会是summary的长度的17倍。长句子的输入对于transformer来说是一个很大的挑战。 # - 处理方式: 即使末尾也有一些信息。我们仍然需要基于选择的模型的最大token对句子进行裁剪 # # 二、 pipeline sample_text = dataset["train"][1]["article"][:2000] summaries = {} nltk.download("punkt") str_ = "The U.S. are a country. The U.N. is an organization." sent_tokenize(str_) # ## 2.1 baseline # 就用文章前三句作为摘要。 def three_sentence_summary(text): return "\n".join(sent_tokenize(text)[:3]) summaries["baseline"] = three_sentence_summary(sample_text) # ## 2.2 gpt-2 # 生成摘要需要增加`\nTL;DR:\n` # - too long; didn't read # - 经常在Reddit等平台上使用,表示长帖子的短版本 set_seed(42) pipe_ = pipeline("text-generation", model="gpt2") gpt2_query = sample_text + "\nTL;DR:\n" pipe_out = pipe_(gpt2_query, max_length=512, clean_up_tokenization_spaces=True) summaries["gpt2"] = "\n".join( sent_tokenize(pipe_out[0]["generated_text"][len(gpt2_query) :]) ) # ## 2.3 T5(`Text-to-Text Transfer Transformer`) # 用混合数据训练 # - 无监督数据: 重建masked单词 # - 监督数据: 一些任务学习 # - 文本摘要: 如"summarize:" # - 翻译: 如"translate English to German:" pipe_ = pipeline("summarization", model="t5-large") pipe_out = pipe_(sample_text) summaries["t5"] = "\n".join(sent_tokenize(pipe_out[0]["summary_text"])) # ## 2.4 BART # > encoder-decoder 结构 # 结合BERT和GPT-2的preTrain方法 pipe_ = pipeline("summarization", model="facebook/bart-large-cnn") pipe_out = pipe_(sample_text) summaries["bart"] = "\n".join(sent_tokenize(pipe_out[0]["summary_text"])) # ## 2.5 PEGASUS # 同样是encode-decoder结构 # 为了找到一个比一般语言建模更接近摘要的预训练目标,他们在一个非常大的语料库中自动识别包含其周围段落的大部分内容的句子(使用摘要评估指标作为内容重叠的启发式方法),并预训练PEGASUS模型来重建这些句子,从而获得用于文本摘要的最先进的模型。 pipe_ = pipeline("summarization", model="google/pegasus-cnn_dailymail") pipe_out = pipe_(sample_text) summaries["pegasus"] = pipe_out[0]["summary_text"].replace(" .<n>", ".\n") # ## 比对 print("GROUND TRUTH") print(dataset["train"][1]["highlights"]) print("") for model_name in summaries: print(model_name.upper()) print("--" * 25) print(summaries[model_name]) print("") # # 三、文本生成评估指标 # ## 3.1 BLEU (`precision-based metric`) # - 评估准确率: 生成句子中有m个单词出现在原文(n个词)中, $bleu=\frac{m}{n}$ # - 引起问题1: # - 如果生成重复的词,并且该词在引用中出现,那么我们会得到较高的分数 # - 针对这点作者指出修正方法:一个单词只计算它在引用中出现的次数。 # - example: ref-"the cat is on the mat" g-"the the the the the the" # - $P_{vanlilla}=\frac{6}{6}$, $P_{mod}=\frac{2}{6}$ # - 修正问题1:`clip` # - 这意味着一个`n-gram`的出现次数以它在参考句中出现的次数为上限 # $$p_n=\frac{ \sum_{geSnt \in C}\sum_{n-gram \in geSnt} Count_{clip}(n-gram) }{ \sum_{geSnt \in C}\sum_{n-gram \in geSnt} Count(n-gram) }$$ # - 引起问题2: # - 因为这个准确率的评估,很显然会对较短的评估对有力,会低估较长生成的结果。 # - 修正问题2:简短惩罚 `brevity penalty` # - $BR = min(1, e^{1 - \frac{l_{ref}}{l_{gen}}} )$ : 生成长度大于原句子:1, 生成长度小于原句子:$(0, 1)$, # **最终公式:** # $$BLEU-N=BR * (\prod_{n=1}^N p_n)^{1/N}$$ # Example: 计算`BLEU-4` # - ref-"the cat sat on the mat" # - g-"the cat the cat is on the mat" # - **BR**: $BR=min(1, e^{1-6/8})=1$ # - **n=1** # - 1-gram: org:{"the", "cat", "sat", "on", "mat"} ge:{"the", "cat", "is", "on", "mat"} # - clip: $count_{clip}("the") = 2, count_{clip}("cat") = 1, count_{clip}("is") = 0, 1-gram \in geSnt$ # - $p_1 = \frac{5}{8} $ # - **n=2** # - 2-gram: org:{"the cat", "cat sat", "sat on", "on the", "the mat"} ge:{"the cat", "cat the", "cat is", "is on", "on the", "the mat"} # - $p_2 = \frac{3}{7} $ # - **n=3** # - 3-gram: org:{"the cat sat", "cat sat on", "sat on the", "on the mat"} ge:{"the cat the", "cat the cat", "the cat is", "cat is on", "is on the", "on the mat"} # - $p_3 = \frac{1}{6} $ # - **n=4** # - 3-gram: org:{"the cat sat on", "cat sat on the", "sat on the mat"} ge:{"the cat the cat", "cat the cat is", "the cat is on", "cat is on the", "is on the mat"} # - $p_4 = \frac{0}{5} $ # - **BLEU-4**: $1 * (\frac{5}{8}*\frac{3}{7}*\frac{1}{6}*\frac{0}{5})^{1/2}=0.$ # bleu_metric = load_metric("sacrebleu") bleu_metric.add( prediction="the cat the cat is on the mat", reference=["the cat sat on the mat"] ) results = bleu_metric.compute(smooth_method="floor", smooth_value=0) results
false
0
2,143
0
2,143
2,143
129595470
<jupyter_start><jupyter_text>Mushroom Classification ### Context Although this dataset was originally contributed to the UCI Machine Learning repository nearly 30 years ago, mushroom hunting (otherwise known as "shrooming") is enjoying new peaks in popularity. Learn which features spell certain death and which are most palatable in this dataset of mushroom characteristics. And how certain can your model be? ### Content This dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family Mushroom drawn from The Audubon Society Field Guide to North American Mushrooms (1981). Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like "leaflets three, let it be'' for Poisonous Oak and Ivy. - **Time period**: Donated to UCI ML 27 April 1987 ### Inspiration - What types of machine learning models perform best on this dataset? - Which features are most indicative of a poisonous mushroom? Kaggle dataset identifier: mushroom-classification <jupyter_script># Hanming Jing import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import roc_curve, auc, f1_score # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split df = pd.read_csv("../input/mushroom-classification/mushrooms.csv") encoder = LabelEncoder() df = df.apply(encoder.fit_transform) df.head() X = df.drop(columns=["class"]) y = df["class"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) print("X_train = ", X_train.shape) print("y_train = ", y_train.shape) print("X_test = ", X_test.shape) print("y_test = ", y_test.shape) # 初始化模型 rfc = RandomForestClassifier(n_estimators=100, random_state=0) gnb = GaussianNB() # 训练模型 rfc.fit(X_train, y_train) gnb.fit(X_train, y_train) # 预测概率 rfc_probs = rfc.predict_proba(X_test)[:, 1] gnb_probs = gnb.predict_proba(X_test)[:, 1] # 计算ROC曲线 rfc_fpr, rfc_tpr, _ = roc_curve(y_test, rfc_probs) gnb_fpr, gnb_tpr, _ = roc_curve(y_test, gnb_probs) # 计算AUC rfc_auc = auc(rfc_fpr, rfc_tpr) gnb_auc = auc(gnb_fpr, gnb_tpr) plt.figure(figsize=(8, 6)) plt.plot(rfc_fpr, rfc_tpr, label="Random Forest (AUC = %0.2f)" % rfc_auc) plt.plot(gnb_fpr, gnb_tpr, label="Gaussian NB (AUC = %0.2f)" % gnb_auc) plt.plot([0, 1], [0, 1], "k--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic") plt.legend(loc="lower right") plt.show() rfc_preds = rfc.predict(X_test) gnb_preds = gnb.predict(X_test) rfc_f1 = f1_score(y_test, rfc_preds) gnb_f1 = f1_score(y_test, gnb_preds) print("Random Forest F1 Score: %.2f" % rfc_f1) print("Bayes F1 Score: %.2f" % gnb_f1)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/595/129595470.ipynb
mushroom-classification
null
[{"Id": 129595470, "ScriptId": 38535384, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13332464, "CreationDate": "05/15/2023 05:57:56", "VersionNumber": 1.0, "Title": "mushroom classification", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 79.0, "LinesInsertedFromPrevious": 41.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 38.0, "LinesInsertedFromFork": 41.0, "LinesDeletedFromFork": 52.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 38.0, "TotalVotes": 0}]
[{"Id": 185824552, "KernelVersionId": 129595470, "SourceDatasetVersionId": 974}]
[{"Id": 974, "DatasetId": 478, "DatasourceVersionId": 974, "CreatorUserId": 495305, "LicenseName": "CC0: Public Domain", "CreationDate": "12/01/2016 23:08:00", "VersionNumber": 1.0, "Title": "Mushroom Classification", "Slug": "mushroom-classification", "Subtitle": "Safe to eat or deadly poison?", "Description": "### Context\n\nAlthough this dataset was originally contributed to the UCI Machine Learning repository nearly 30 years ago, mushroom hunting (otherwise known as \"shrooming\") is enjoying new peaks in popularity. Learn which features spell certain death and which are most palatable in this dataset of mushroom characteristics. And how certain can your model be?\n\n### Content \n\nThis dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family Mushroom drawn from The Audubon Society Field Guide to North American Mushrooms (1981). Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like \"leaflets three, let it be'' for Poisonous Oak and Ivy.\n\n- **Time period**: Donated to UCI ML 27 April 1987\n\n### Inspiration\n\n- What types of machine learning models perform best on this dataset?\n\n- Which features are most indicative of a poisonous mushroom?\n\n### Acknowledgements\n\nThis dataset was originally donated to the UCI Machine Learning repository. You can learn more about past research using the data [here][1]. \n\n#[Start a new kernel][2]\n\n\n [1]: https://archive.ics.uci.edu/ml/datasets/Mushroom\n [2]: https://www.kaggle.com/uciml/mushroom-classification/kernels?modal=true", "VersionNotes": "Initial release", "TotalCompressedBytes": 374003.0, "TotalUncompressedBytes": 374003.0}]
[{"Id": 478, "CreatorUserId": 495305, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 974.0, "CurrentDatasourceVersionId": 974.0, "ForumId": 2099, "Type": 2, "CreationDate": "12/01/2016 23:08:00", "LastActivityDate": "02/06/2018", "TotalViews": 873597, "TotalDownloads": 114985, "TotalVotes": 2206, "TotalKernels": 1371}]
null
# Hanming Jing import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import roc_curve, auc, f1_score # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split df = pd.read_csv("../input/mushroom-classification/mushrooms.csv") encoder = LabelEncoder() df = df.apply(encoder.fit_transform) df.head() X = df.drop(columns=["class"]) y = df["class"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) print("X_train = ", X_train.shape) print("y_train = ", y_train.shape) print("X_test = ", X_test.shape) print("y_test = ", y_test.shape) # 初始化模型 rfc = RandomForestClassifier(n_estimators=100, random_state=0) gnb = GaussianNB() # 训练模型 rfc.fit(X_train, y_train) gnb.fit(X_train, y_train) # 预测概率 rfc_probs = rfc.predict_proba(X_test)[:, 1] gnb_probs = gnb.predict_proba(X_test)[:, 1] # 计算ROC曲线 rfc_fpr, rfc_tpr, _ = roc_curve(y_test, rfc_probs) gnb_fpr, gnb_tpr, _ = roc_curve(y_test, gnb_probs) # 计算AUC rfc_auc = auc(rfc_fpr, rfc_tpr) gnb_auc = auc(gnb_fpr, gnb_tpr) plt.figure(figsize=(8, 6)) plt.plot(rfc_fpr, rfc_tpr, label="Random Forest (AUC = %0.2f)" % rfc_auc) plt.plot(gnb_fpr, gnb_tpr, label="Gaussian NB (AUC = %0.2f)" % gnb_auc) plt.plot([0, 1], [0, 1], "k--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic") plt.legend(loc="lower right") plt.show() rfc_preds = rfc.predict(X_test) gnb_preds = gnb.predict(X_test) rfc_f1 = f1_score(y_test, rfc_preds) gnb_f1 = f1_score(y_test, gnb_preds) print("Random Forest F1 Score: %.2f" % rfc_f1) print("Bayes F1 Score: %.2f" % gnb_f1)
false
0
925
0
1,227
925
129401071
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plottingutility import matplotlib.pyplot as plt import matplotlib import seaborn as sns from sklearn.preprocessing import MinMaxScaler import typing ROWS, COLS = 3, 2 TARGET = "Class" KURT_THRESHOLD = 0.5 df_train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") df_test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") del df_train["Id"] del df_test["Id"] y_train = df_train[TARGET] def get_upper_bound_by_map(serie: pd.Series) -> np.float64: name_to_upper_bound = { "AH": 100, "AB": 1, "AF": 8000, "AH": 88, "AM": 100, "AR": 25, "AX": 10, "AY": 0.025578, } return name_to_upper_bound[serie.name] def get_upper_bound_by_calculation(serie: pd.Series) -> np.float64: return serie.mean() + serie.std() def squash_tail( serie: pd.Series, col, upper_bound_calculation: typing.Callable[[pd.Series], np.float64] = None, ): if upper_bound_calculation is None: upper_bound_calculation = get_upper_bound_by_calculation if serie.dtype != "float64": return serie fig, ax = plt.subplots(1, 2, figsize=(20, 1 * 4)) ax = ax.flatten() if serie.kurt() > KURT_THRESHOLD: plottingutility.plot_histogram( serie, current_axis=ax[0], title=f"{col} before clipping" ) upper_bound = upper_bound_calculation(serie) serie = np.log(serie.clip(0, upper_bound)) plottingutility.plot_histogram( serie, current_axis=ax[1], title=f"{col} after clipping" ) else: plottingutility.plot_histogram( serie, current_axis=ax[0], title=f"{col} before clipping" ) plottingutility.plot_histogram( serie, current_axis=ax[1], title=f"{col} after clipping" ) return serie df_train_copy_one = df_train.copy() df_train_copy_two = df_train.copy() for col in df_train.columns[:6]: df_train_copy_one[col] = squash_tail(df_train_copy_one[col], col) df_train_copy_two[col] = squash_tail( df_train_copy_two[col], col, get_upper_bound_by_map ) fix, axs = plt.subplots( ROWS, COLS, figsize=(17, 17), gridspec_kw={"wspace": 0.25, "hspace": 0.25} ) plt.subplots_adjust(wspace=0.25, hspace=0.25) row_idx = 0 col_idx = 0 for idx, start in enumerate([0, 10, 20, 30, 40, 50]): if idx % COLS == 0 and idx != 0: row_idx += 1 col_idx = 0 if start != 50: end = start + 10 else: end = start + 7 plottingutility.plot_correlations( df_train[[*df_train.columns[start:end], TARGET]], axs[row_idx, col_idx] ) col_idx += 1 df_train["EJ"] = df_train["EJ"].factorize()[0] df_test["EJ"] = df_test["EJ"].factorize()[0] TAIL_DISTRIBUTED_FEATURES = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BP", "BR", "BZ", "CB", "CC", ] def qcut_columns(col): df_train[f"{col}_cut"] = pd.qcut(df_train[f"{col}"], q=4, labels=False) return df_train for col in ["AM", "CB", "BR"]: df_train = qcut_columns(col) df_original = df_train[TAIL_DISTRIBUTED_FEATURES].copy() df_train[TAIL_DISTRIBUTED_FEATURES] = np.log(df_train[TAIL_DISTRIBUTED_FEATURES]) scaled_rows = MinMaxScaler().fit_transform(df_train[df_test.columns]) scaled_frame = pd.DataFrame(data=scaled_rows, columns=df_test.columns) plt_features([scaled_frame], scaled_frame.columns)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/401/129401071.ipynb
null
null
[{"Id": 129401071, "ScriptId": 38444354, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3245061, "CreationDate": "05/13/2023 13:22:27", "VersionNumber": 3.0, "Title": "notebook51e2a2142a", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 91.0, "LinesInsertedFromPrevious": 35.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 56.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plottingutility import matplotlib.pyplot as plt import matplotlib import seaborn as sns from sklearn.preprocessing import MinMaxScaler import typing ROWS, COLS = 3, 2 TARGET = "Class" KURT_THRESHOLD = 0.5 df_train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") df_test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") del df_train["Id"] del df_test["Id"] y_train = df_train[TARGET] def get_upper_bound_by_map(serie: pd.Series) -> np.float64: name_to_upper_bound = { "AH": 100, "AB": 1, "AF": 8000, "AH": 88, "AM": 100, "AR": 25, "AX": 10, "AY": 0.025578, } return name_to_upper_bound[serie.name] def get_upper_bound_by_calculation(serie: pd.Series) -> np.float64: return serie.mean() + serie.std() def squash_tail( serie: pd.Series, col, upper_bound_calculation: typing.Callable[[pd.Series], np.float64] = None, ): if upper_bound_calculation is None: upper_bound_calculation = get_upper_bound_by_calculation if serie.dtype != "float64": return serie fig, ax = plt.subplots(1, 2, figsize=(20, 1 * 4)) ax = ax.flatten() if serie.kurt() > KURT_THRESHOLD: plottingutility.plot_histogram( serie, current_axis=ax[0], title=f"{col} before clipping" ) upper_bound = upper_bound_calculation(serie) serie = np.log(serie.clip(0, upper_bound)) plottingutility.plot_histogram( serie, current_axis=ax[1], title=f"{col} after clipping" ) else: plottingutility.plot_histogram( serie, current_axis=ax[0], title=f"{col} before clipping" ) plottingutility.plot_histogram( serie, current_axis=ax[1], title=f"{col} after clipping" ) return serie df_train_copy_one = df_train.copy() df_train_copy_two = df_train.copy() for col in df_train.columns[:6]: df_train_copy_one[col] = squash_tail(df_train_copy_one[col], col) df_train_copy_two[col] = squash_tail( df_train_copy_two[col], col, get_upper_bound_by_map ) fix, axs = plt.subplots( ROWS, COLS, figsize=(17, 17), gridspec_kw={"wspace": 0.25, "hspace": 0.25} ) plt.subplots_adjust(wspace=0.25, hspace=0.25) row_idx = 0 col_idx = 0 for idx, start in enumerate([0, 10, 20, 30, 40, 50]): if idx % COLS == 0 and idx != 0: row_idx += 1 col_idx = 0 if start != 50: end = start + 10 else: end = start + 7 plottingutility.plot_correlations( df_train[[*df_train.columns[start:end], TARGET]], axs[row_idx, col_idx] ) col_idx += 1 df_train["EJ"] = df_train["EJ"].factorize()[0] df_test["EJ"] = df_test["EJ"].factorize()[0] TAIL_DISTRIBUTED_FEATURES = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BP", "BR", "BZ", "CB", "CC", ] def qcut_columns(col): df_train[f"{col}_cut"] = pd.qcut(df_train[f"{col}"], q=4, labels=False) return df_train for col in ["AM", "CB", "BR"]: df_train = qcut_columns(col) df_original = df_train[TAIL_DISTRIBUTED_FEATURES].copy() df_train[TAIL_DISTRIBUTED_FEATURES] = np.log(df_train[TAIL_DISTRIBUTED_FEATURES]) scaled_rows = MinMaxScaler().fit_transform(df_train[df_test.columns]) scaled_frame = pd.DataFrame(data=scaled_rows, columns=df_test.columns) plt_features([scaled_frame], scaled_frame.columns)
false
0
1,266
0
1,266
1,266
129401113
import glob glob.glob("../mAP/output/*") with open("../mAP/output/output.txt", "r") as file: data = file.read() mAP = float( data[data.find("mAP = ") + 6 : data.find("# Number of ground-truth ") - 3] ) print(mAP)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/401/129401113.ipynb
null
null
[{"Id": 129401113, "ScriptId": 38469672, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9253757, "CreationDate": "05/13/2023 13:22:48", "VersionNumber": 1.0, "Title": "Tinh MAP", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 12.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import glob glob.glob("../mAP/output/*") with open("../mAP/output/output.txt", "r") as file: data = file.read() mAP = float( data[data.find("mAP = ") + 6 : data.find("# Number of ground-truth ") - 3] ) print(mAP)
false
0
84
0
84
84